Spaces:
Sleeping
Sleeping
File size: 25,075 Bytes
6db421b 7dab3ce 6db421b c8f736d 6db421b 18b49d0 9c79583 7dab3ce 8ea99f5 5265a5a c8f736d 6db421b e471a32 294e045 c8f736d 5265a5a 6db421b c8f736d e471a32 da146d9 18b49d0 da146d9 18b49d0 e471a32 18b49d0 fa9a363 6db421b c0af503 18b49d0 9d9024e 3f006bc 142e8e8 6db421b fa9a363 5265a5a fa9a363 7dab3ce da146d9 07cd63c da146d9 6db421b 7dab3ce fa9a363 7dab3ce a2f22ef 6db421b 3f006bc 18b49d0 7dab3ce 18b49d0 7dab3ce 3f006bc 18b49d0 7dab3ce fa9a363 7dab3ce 9d1b8ec fa9a363 e82afb7 fa9a363 e82afb7 fa9a363 c8f736d 5265a5a c8f736d 5265a5a c8f736d 5265a5a c8f736d 5265a5a 9d1b8ec 8ea99f5 3f006bc 8ea99f5 fa9a363 e82afb7 fa9a363 4535d8e 8ea99f5 74d84fb 8b4678f e82afb7 8b4678f e82afb7 8b4678f 74d84fb 8ea99f5 23632c3 fa9a363 e82afb7 fa9a363 8b4678f 8ea99f5 23632c3 8ea99f5 8b4678f 8ea99f5 fa9a363 8ea99f5 e82afb7 8ea99f5 23632c3 8ea99f5 3f006bc 2965a81 7dab3ce fa9a363 7dab3ce 2965a81 3f006bc d6dffb9 7dab3ce fa9a363 3f006bc fa9a363 d6dffb9 fa9a363 e4a5b6a 4535d8e e4a5b6a fa9a363 7dab3ce 6db421b a2f22ef fa5d820 a2f22ef c0af503 9c79583 c0af503 d6cec65 9c79583 74d84fb c0af503 142e8e8 18b49d0 7dab3ce 2965a81 fa9a363 142e8e8 3f006bc fa9a363 7dab3ce 3f006bc 18b49d0 7dab3ce fa9a363 7dab3ce c0af503 8ea99f5 7dab3ce fa9a363 8ea99f5 fa9a363 c8f736d 8ea99f5 9d1b8ec c8f736d 5265a5a 8ea99f5 74d84fb 8b4678f 142e8e8 8b4678f c8f736d 142e8e8 74d84fb 8ea99f5 f777d19 7dab3ce 8ea99f5 c0af503 fa9a363 3f006bc 142e8e8 3f006bc fa9a363 c0af503 7dab3ce 3f006bc da146d9 9d9024e 142e8e8 3f006bc 9d9024e 18b49d0 7dab3ce 9d9024e 18b49d0 3f006bc 142e8e8 a2f22ef 3f006bc a2f22ef 2965a81 fa9a363 d6dffb9 fa9a363 a2f22ef 7dab3ce a2f22ef 18b49d0 3f006bc 18b49d0 fa9a363 18b49d0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 | import streamlit as st
from dotenv import load_dotenv
from langchain_community.document_loaders import WebBaseLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores.faiss import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
import os
import json
from langchain_groq import ChatGroq
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain.prompts import PromptTemplate
from bs4 import SoupStrainer
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled, NoTranscriptFound
import yt_dlp
import re
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# Load environment variables (optional)
load_dotenv()
# Hardcoded Groq API key
GROQ_API_KEY = "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"
# YouTube API key (to be set in Hugging Face Spaces secrets, optional if using OAuth)
YOUTUBE_API_KEY = os.getenv("YOUTUBE_API_KEY")
# Path to store OAuth credentials
CREDENTIALS_FILE = "youtube_credentials.json"
CLIENT_SECRETS_FILE = "client_secrets.json"
# Custom CSS
st.markdown("""
<style>
body {
background: linear-gradient(135deg, #1e3c72, #2a5298);
color: #ffffff;
font-family: 'Arial', sans-serif;
}
.stSidebar, .main .block-container {
background: rgba(255, 255, 255, 0.1);
border-radius: 15px;
backdrop-filter: blur(10px);
-webkit-backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.2);
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2);
padding: 20px;
}
.stTextInput > div > input {
background: rgba(255, 255, 255, 0.15);
color: #ffffff;
border: 1px solid rgba(255, 255, 255, 0.3);
border-radius: 10px;
padding: 10px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}
.stButton > button {
background: linear-gradient(45deg, #6b48ff, #00ddeb);
color: #ffffff;
border: none;
border-radius: 10px;
padding: 10px 20px;
font-weight: bold;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
transition: transform 0.2s;
}
.stButton > button:hover {
transform: translateY(-2px);
box-shadow: 0 6px 16px rgba(0, 0, 0, 0.3);
}
h1, h2, h3 {
color: #ffffff;
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
}
.stText {
color: #e0e0e0;
font-weight: bold;
}
.stAlert {
background: rgba(255, 50, 50, 0.2);
border: 1px solid rgba(255, 50, 50, 0.5);
border-radius: 10px;
color: #ffcccc;
}
.stAlert[role="alert"] > div {
background: rgba(255, 200, 0, 0.2);
border: 1px solid rgba(255, 200, 0, 0.5);
color: #fff5cc;
}
.stSpinner > div {
color: #00ddeb;
}
.footer {
display: flex;
align-items: center;
justify-content: center;
padding: 10px;
background: rgba(255, 255, 255, 0.1);
border-top: 1px solid rgba(255, 255, 255, 0.2);
position: fixed;
bottom: 0;
width: 100%;
color: #e0e0e0;
font-size: 14px;
}
.footer img {
margin-right: 10px;
}
</style>
""", unsafe_allow_html=True)
# Display large logo at the top of the main page
st.image("https://i.postimg.cc/2j0QWF3Z/Removal-575.png", width=390)
# Set Streamlit app title
st.title("WebChatter π¬")
# Initialize session state
if "url_content" not in st.session_state:
st.session_state.url_content = None
if "summary" not in st.session_state:
st.session_state.summary = None
if "vectorstore" not in st.session_state:
st.session_state.vectorstore = None
if "index_created" not in st.session_state:
st.session_state.index_created = False
if "content_type" not in st.session_state:
st.session_state.content_type = None
# Initialize LLM once at the start
if "llm" not in st.session_state:
st.session_state.llm = ChatGroq(
api_key=GROQ_API_KEY,
model="llama3-70b-8192",
max_tokens=512 # Keep reduced to minimize resource usage
)
# Sidebar for URL and YouTube input
with st.sidebar:
st.header("Enter Web URL")
url = st.text_input("URL", placeholder="e.g., https://mahatirtusher.com/astronomy-mythology/")
process_url_clicked = st.button("Process URL")
st.header("Enter YouTube URL")
youtube_url = st.text_input("YouTube URL", placeholder="e.g., https://www.youtube.com/watch?v=DJO_9auJhJQ")
process_youtube_clicked = st.button("Process YouTube Video")
# Main content container
main_container = st.container()
# Custom prompt for detailed answers (for web URLs only)
qa_prompt = PromptTemplate(
template="""You are an expert assistant tasked with providing detailed, extensive, and comprehensive answers. Use the provided context to answer the question thoroughly, including explanations, examples, and additional relevant information. If the context is limited, expand on the topic with your knowledge to ensure a complete response. In case of explaining anything, break the topic and explain step by step. Sometimes use your own reasoning and knowledge to explain anything to the users. If the users ask any question in Bengali, you too will answer it in fine and detailed Bengali.
Context: {context}
Question: {question}
Answer with sources: """
)
# Function to summarize content
def summarize_content(content, llm, is_youtube=False):
if is_youtube:
# Extensive summary for YouTube videos (15-20 sentences)
summary_prompt = f"""You are an expert summarizer tasked with providing a very detailed and extensive summary of the following YouTube video transcript. Capture all key points, main ideas, and significant details in 15-20 sentences. Include specific examples, quotes, or moments from the transcript to make the summary comprehensive and vivid. Ensure the summary is well-organized, flowing naturally from one point to the next, and provides a thorough overview of the video's content.
Transcript: {content}
Extensive Summary: """
else:
# Shorter summary for web URLs (5-10 sentences)
summary_prompt = f"""Summarize the following content in 5-10 sentences, capturing the main points and key details in easy expression:
{content}
Summary: """
summary = llm.invoke(summary_prompt).content
return summary
# Function to extract YouTube video ID from URL
def get_video_id(url):
if "youtube.com/watch?v=" in url:
return url.split("v=")[1].split("&")[0]
elif "youtu.be/" in url:
return url.split("youtu.be/")[1].split("?")[0]
return None
# Function to fetch YouTube transcript
def fetch_youtube_transcript(video_id):
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
# Try English variants first
for lang in ['en', 'en-US', 'en-GB']:
try:
transcript = transcript_list.find_transcript([lang]).fetch()
full_text = " ".join([item['text'] for item in transcript])
return full_text
except NoTranscriptFound:
continue
# If no English transcript, try any available transcript and translate to English
for transcript in transcript_list:
if transcript.is_translatable:
translated_transcript = transcript.translate('en').fetch()
return " ".join([item['text'] for item in translated_transcript])
return None
except TranscriptsDisabled:
return None
except Exception as e:
st.error(f"Error fetching transcript with youtube-transcript-api: {str(e)}")
return None
# Function to get YouTube API credentials
def get_youtube_credentials():
creds = None
if os.path.exists(CREDENTIALS_FILE):
creds = Credentials.from_authorized_user_file(CREDENTIALS_FILE, scopes=['https://www.googleapis.com/auth/youtube.force-ssl'])
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if os.path.exists(CLIENT_SECRETS_FILE):
st.warning("Attempting to authenticate with YouTube Data API. This may not work in Hugging Face Spaces due to redirect URI limitations.")
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRETS_FILE,
scopes=['https://www.googleapis.com/auth/youtube.force-ssl']
)
# This will fail in Hugging Face Spaces because it can't open a browser
creds = flow.run_local_server(port=0)
with open(CREDENTIALS_FILE, 'w') as token_file:
token_file.write(creds.to_json())
else:
st.warning(
f"{CLIENT_SECRETS_FILE} not found. To use OAuth 2.0 for YouTube Data API:\n"
"1. Go to https://console.developers.google.com/.\n"
"2. Create a project, enable YouTube Data API v3, and create OAuth 2.0 credentials.\n"
"3. Download the credentials as 'client_secrets.json'.\n"
"4. Run the app locally: pip install -r requirements.txt && streamlit run app.py\n"
"5. Authenticate via the browser prompt to generate youtube_credentials.json.\n"
"6. Upload youtube_credentials.json to your Hugging Face Space via the Files tab."
)
return None
return creds
# Function to fetch captions using YouTube Data API (with OAuth 2.0 or API key fallback)
def fetch_youtube_captions_api(video_id, api_key=None):
# First, try OAuth 2.0 if credentials are available
creds = get_youtube_credentials()
if creds:
try:
youtube = build('youtube', 'v3', credentials=creds)
captions = youtube.captions().list(
part='snippet',
videoId=video_id
).execute()
caption_id = None
for item in captions.get('items', []):
if item['snippet']['language'] == 'en':
caption_id = item['id']
break
elif item['snippet']['language'] in ['en-US', 'en-GB']:
caption_id = item['id']
break
if not caption_id:
st.warning("No English captions found via YouTube Data API.")
return None
# Download captions using OAuth 2.0 credentials
caption_content = youtube.captions().download(
id=caption_id,
tfmt='srt'
).execute()
# The response is a binary string, decode it
caption_text = caption_content.decode('utf-8')
# Parse SRT format to extract text
lines = caption_text.split('\n')
text_lines = []
for line in lines:
if line.strip() and not line.isdigit() and not re.match(r'\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}', line):
text_lines.append(line.strip())
return " ".join(text_lines)
except HttpError as e:
st.error(f"Error fetching captions with YouTube Data API (OAuth 2.0): {str(e)}")
return None
# Fallback to API key if OAuth fails or credentials are not available
if not api_key:
st.warning("YOUTUBE_API_KEY not set and OAuth 2.0 credentials not available. Skipping YouTube Data API fallback.")
return None
try:
youtube = build('youtube', 'v3', developerKey=api_key)
captions = youtube.captions().list(
part='snippet',
videoId=video_id
).execute()
caption_id = None
for item in captions.get('items', []):
if item['snippet']['language'] == 'en':
caption_id = item['id']
break
elif item['snippet']['language'] in ['en-US', 'en-GB']:
caption_id = item['id']
break
if not caption_id:
st.warning("No English captions found via YouTube Data API.")
return None
# Note: Downloading captions requires OAuth 2.0 authentication
st.warning(
"English captions are available for this video but cannot be fetched with an API key alone. "
"Downloading captions requires OAuth 2.0 authentication, which is not supported in Hugging Face Spaces without user interaction. "
"To fetch captions:\n"
"- Follow the instructions above to generate youtube_credentials.json locally and upload it.\n"
"- Or try a video with transcripts available (e.g., https://www.youtube.com/watch?v=dQw4w9WgXcQ)."
)
return None
except HttpError as e:
st.error(f"Error fetching captions with YouTube Data API (API Key): {str(e)}")
return None
# Function to extract subtitles using yt-dlp with cookies
def extract_subtitles_with_ytdlp(video_url):
ydl_opts = {
'writesubtitles': True,
'writeautomaticsub': True,
'subtitleslangs': ['all', '-live_chat'],
'skip_download': True,
'subtitlesformat': 'vtt',
'outtmpl': 'subtitle.%(ext)s',
'http_headers': {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
},
'cookiefile': 'cookies.txt',
}
try:
if not os.path.exists('cookies.txt'):
st.error(
"cookies.txt file not found. Please upload a valid cookies.txt file to the root directory of your Space. "
"To generate it:\n"
"1. Open Chrome and log in to YouTube.\n"
"2. Install the 'Export Cookies' extension (or use a tool like 'cookies.txt' for Firefox).\n"
"3. Export cookies for 'youtube.com' and save as 'cookies.txt'.\n"
"4. Upload the file to your Space via the Files tab.\n"
"Alternative: If this fails, test locally to rule out Spaces IP restrictions."
)
return None
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(video_url, download=False)
available_subs = info.get('subtitles', {})
auto_subs = info.get('automatic_captions', {})
subtitle_langs = list(available_subs.keys()) or list(auto_subs.keys())
if not subtitle_langs:
st.warning("No subtitles or auto-captions available in any language.")
return None
ydl.params['subtitleslangs'] = subtitle_langs
ydl.download([video_url])
subtitle_file = None
for lang in subtitle_langs:
possible_file = f"subtitle.{lang}.vtt"
if os.path.exists(possible_file):
subtitle_file = possible_file
break
if not subtitle_file:
st.warning("No subtitle files were downloaded.")
return None
with open(subtitle_file, 'r', encoding='utf-8') as f:
subtitle_text = f.read()
os.remove(subtitle_file)
lines = subtitle_text.split('\n')
text_lines = []
for line in lines:
if line.strip() and not line.startswith('WEBVTT') and not line.startswith('Kind:') and not line.startswith('Language:') and not re.match(r'\d{2}:\d{2}:\d{2}\.\d{3} --> \d{2}:\d{2}:\d{2}\.\d{3}', line):
text_lines.append(line.strip())
return " ".join(text_lines)
except Exception as e:
st.error(f"Error fetching captions with yt-dlp: {str(e)}")
return None
# Function to process and chunk text (for web URLs only)
def process_content(text, embeddings, source):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
separators=["\n\n", "\n", ".", " ", ""]
)
docs = text_splitter.create_documents([text], metadatas=[{"source": source}])
if not docs:
st.error("No documents created from the content.")
return None
vectorstore = FAISS.from_documents(docs, embeddings)
return vectorstore
# Function to create QA chain (for web URLs only)
def create_qa_chain(vectorstore, llm):
if vectorstore is None:
st.error("Vector store is not initialized. Cannot create QA chain.")
return None
retriever = vectorstore.as_retriever(search_kwargs={"k": 2})
qa_chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={
"prompt": qa_prompt,
"document_variable_name": "context"
}
)
return qa_chain
# Process Web URL
if process_url_clicked:
with main_container:
if not url.strip():
st.error("Please provide a valid URL.")
else:
with st.spinner("Processing URL..."):
try:
st.text("Data Loading...Started...β
β
β
")
parse_only = SoupStrainer(['title', 'p', 'h1', 'h2', 'h3'])
loader = WebBaseLoader(
web_paths=[url.strip()],
bs_kwargs={"parse_only": parse_only},
requests_kwargs={"headers": {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}})
data = loader.load()
if not data or all(len(doc.page_content.strip()) == 0 for doc in data):
st.error("No content loaded from URL. Try a different URL (e.g., https://www.bbc.com/news/science-environment-67299122).")
st.stop()
# Initialize embeddings only when needed
if "embeddings" not in st.session_state:
st.session_state.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
st.session_state.url_content = "\n".join([doc.page_content for doc in data])
embeddings = st.session_state.embeddings
st.session_state.vectorstore = process_content(st.session_state.url_content, embeddings, source=url.strip())
st.session_state.index_created = True
st.session_state.content_type = "web"
st.session_state.summary = None
st.text("Content processed successfully! β
β
β
")
except Exception as e:
st.error(f"Error processing URL: {str(e)}")
st.stop()
# Process YouTube Video
if process_youtube_clicked:
with main_container:
if not youtube_url.strip():
st.error("Please provide a valid YouTube URL.")
else:
with st.spinner("Processing YouTube Video..."):
try:
video_id = get_video_id(youtube_url)
if not video_id:
st.error("Invalid YouTube URL. Please provide a URL like https://www.youtube.com/watch?v=VIDEO_ID.")
st.stop()
transcript_text = None
st.text("Fetching Transcript...Started...β
β
β
")
transcript_text = fetch_youtube_transcript(video_id)
if not transcript_text:
st.warning("Transcripts are disabled or unavailable. Attempting to fetch closed captions...")
st.text("Fetching Closed Captions with yt-dlp...Started...β
β
β
")
transcript_text = extract_subtitles_with_ytdlp(youtube_url)
if not transcript_text:
st.text("Fetching Captions via YouTube Data API...Started...β
β
β
")
transcript_text = fetch_youtube_captions_api(video_id, YOUTUBE_API_KEY)
if not transcript_text:
st.error(
"No transcripts or closed captions available. "
"Possible reasons:\n"
"1. Captions are not enabled for this video.\n"
"2. YouTube detected this request as a bot (even with cookies.txt).\n"
"Solutions:\n"
"- Ensure captions are enabled for the video by checking the video settings on YouTube (gear icon > Subtitles/CC > Enable if available).\n"
"- Regenerate and upload a fresh cookies.txt file (see instructions above).\n"
"- Set up OAuth 2.0 credentials by following the instructions above to download captions directly.\n"
"- Try a different video (e.g., https://www.youtube.com/watch?v=dQw4w9WgXcQ, which has transcripts available).\n"
"- Test locally to rule out Hugging Face Spaces IP restrictions by running: pip install -r requirements.txt && streamlit run app.py"
)
st.stop()
if not transcript_text.strip():
st.error("Transcript or captions are empty. Try a different video.")
st.stop()
st.session_state.url_content = transcript_text
# No vector store for YouTube videos since we're not doing QA
st.session_state.vectorstore = None
st.session_state.index_created = False
st.session_state.content_type = "youtube"
st.session_state.summary = None
st.text("YouTube video processed successfully! β
β
β
")
except Exception as e:
st.error(f"Error processing YouTube video: {str(e)}")
st.stop()
# Summary button
with main_container:
if st.session_state.url_content and st.button("Generate Summary"):
with st.spinner("Generating summary..."):
is_youtube = st.session_state.content_type == "youtube"
st.session_state.summary = summarize_content(st.session_state.url_content, st.session_state.llm, is_youtube=is_youtube)
# Display summary if generated
if st.session_state.summary:
with main_container:
st.header("Summary of the Content")
st.write(st.session_state.summary)
# Query input with Ask button (only for web URLs)
if st.session_state.url_content and st.session_state.content_type == "web":
with main_container:
st.header("Ask a Question")
query = st.text_input("Question", placeholder="e.g., What is the article about?")
ask_clicked = st.button("Ask")
if ask_clicked and query:
with st.spinner("Processing your question..."):
try:
if "qa_chain" not in st.session_state or st.session_state.qa_chain is None:
st.session_state.qa_chain = create_qa_chain(st.session_state.vectorstore, st.session_state.llm)
if st.session_state.qa_chain is None:
st.error("Failed to create QA chain.")
st.stop()
result = st.session_state.qa_chain({"question": query}, return_only_outputs=True)
if not result.get("answer"):
st.warning("No answer generated. Try a different question or content.")
st.stop()
st.header("Answer")
st.write(result["answer"])
sources = result.get("sources", "")
if sources:
st.subheader("Sources:")
sources_list = sources.split("\n")
for source in sources_list:
st.write(source)
else:
st.write("No sources found.")
except Exception as e:
st.error(f"Error answering query: {str(e)}")
st.stop()
# Footer with tiny logo and text
st.markdown(
"""
<div class="footer">
<img src="https://i.postimg.cc/2j0QWF3Z/Removal-575.png" width="80">
WebChatter Β© 2025 | Developed by Mahatir Ahmed Tusher
</div>
""",
unsafe_allow_html=True
) |