File size: 1,537 Bytes
1f33f1c
bf2dd99
 
1f33f1c
bf2dd99
 
63f2829
1f33f1c
63f2829
bf2dd99
1f33f1c
63f2829
 
 
bf2dd99
63f2829
1f33f1c
 
bf2dd99
1f33f1c
 
63f2829
bf2dd99
 
 
 
 
 
 
 
63f2829
bf2dd99
 
 
 
 
 
1f33f1c
bf2dd99
 
 
1f33f1c
 
bf2dd99
 
63f2829
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
from dotenv import load_dotenv
from tavily import TavilyClient
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
import streamlit as st

# Load .env
load_dotenv()

# API keys
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")

# LLM
llm = ChatGoogleGenerativeAI(
    model="models/gemini-1.5-flash",
    google_api_key=GOOGLE_API_KEY
)

# Tavily
tavily_client = TavilyClient(api_key=TAVILY_API_KEY)

def extract_website_text(url):
    result = tavily_client.extract(urls=url)
    if result and "text" in result[0]:
        return result[0]["text"]
    return "Could not extract content from the URL."

# Prompt
prompt = PromptTemplate(
    input_variables=["website_content", "question"],
    template="""
You are an intelligent assistant. Based on the following website content:

{website_content}

Answer the following question:
{question}
"""
)

qa_chain = LLMChain(llm=llm, prompt=prompt)

# Streamlit UI
st.title("🌐 WebQueryBot – Ask any website!")
url = st.text_input("Enter a website URL:")
question = st.text_area("What do you want to ask about the website?")

if st.button("Get Answer"):
    with st.spinner("Extracting and generating answer..."):
        site_text = extract_website_text(url)
        result = qa_chain.invoke({
            "website_content": site_text,
            "question": question
        })
        st.subheader("✅ Answer")
        st.write(result["text"])