Spaces:
Sleeping
Sleeping
| import os | |
| from dotenv import load_dotenv | |
| from tavily import TavilyClient | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| import streamlit as st | |
| # Load .env | |
| load_dotenv() | |
| # API keys | |
| GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
| TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") | |
| # LLM | |
| llm = ChatGoogleGenerativeAI( | |
| model="models/gemini-1.5-flash", | |
| google_api_key=GOOGLE_API_KEY | |
| ) | |
| # Tavily | |
| tavily_client = TavilyClient(api_key=TAVILY_API_KEY) | |
| def extract_website_text(url): | |
| result = tavily_client.extract(urls=url) | |
| if result and "text" in result[0]: | |
| return result[0]["text"] | |
| return "Could not extract content from the URL." | |
| # Prompt | |
| prompt = PromptTemplate( | |
| input_variables=["website_content", "question"], | |
| template=""" | |
| You are an intelligent assistant. Based on the following website content: | |
| {website_content} | |
| Answer the following question: | |
| {question} | |
| """ | |
| ) | |
| qa_chain = LLMChain(llm=llm, prompt=prompt) | |
| # Streamlit UI | |
| st.title("π WebQueryBot β Ask any website!") | |
| url = st.text_input("Enter a website URL:") | |
| question = st.text_area("What do you want to ask about the website?") | |
| if st.button("Get Answer"): | |
| with st.spinner("Extracting and generating answer..."): | |
| site_text = extract_website_text(url) | |
| result = qa_chain.invoke({ | |
| "website_content": site_text, | |
| "question": question | |
| }) | |
| st.subheader("β Answer") | |
| st.write(result["text"]) | |