ytrsoymr commited on
Commit
63f2829
·
verified ·
1 Parent(s): 2eb4c90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -29
app.py CHANGED
@@ -1,26 +1,25 @@
1
- # app.py
2
-
3
  import os
4
  from dotenv import load_dotenv
5
  from tavily import TavilyClient
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
  from langchain.chains import LLMChain
8
  from langchain.prompts import PromptTemplate
 
9
 
10
- # 1. Load environment variables from .env file (if present)
11
  load_dotenv()
12
 
13
- # 2. Set API keys from environment
14
- GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
15
- TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
16
 
17
- # 3. Initialize Gemini LLM
18
  llm = ChatGoogleGenerativeAI(
19
  model="models/gemini-1.5-flash",
20
  google_api_key=GOOGLE_API_KEY
21
  )
22
 
23
- # 4. Tavily Client
24
  tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
25
 
26
  def extract_website_text(url):
@@ -29,7 +28,7 @@ def extract_website_text(url):
29
  return result[0]["text"]
30
  return "Could not extract content from the URL."
31
 
32
- # 5. Prompt Template
33
  prompt = PromptTemplate(
34
  input_variables=["website_content", "question"],
35
  template="""
@@ -42,25 +41,19 @@ Answer the following question:
42
  """
43
  )
44
 
45
- # 6. LLM QA Chain
46
  qa_chain = LLMChain(llm=llm, prompt=prompt)
47
 
48
- # 7. Main Chat Function
49
- def ask_from_website(url, question):
50
- print(f"\n🔗 Extracting content from: {url}")
51
- website_text = extract_website_text(url)
52
-
53
- print(f"\n💬 Asking: {question}")
54
- response = qa_chain.invoke({
55
- "website_content": website_text,
56
- "question": question
57
- })
58
-
59
- print("\n✅ Answer:")
60
- print(response["text"])
61
-
62
- # 8. Run
63
- if __name__ == "__main__":
64
- url = input("Enter the website URL: ")
65
- question = input("What do you want to ask about this website? ")
66
- ask_from_website(url, question)
 
 
 
1
  import os
2
  from dotenv import load_dotenv
3
  from tavily import TavilyClient
4
  from langchain_google_genai import ChatGoogleGenerativeAI
5
  from langchain.chains import LLMChain
6
  from langchain.prompts import PromptTemplate
7
+ import streamlit as st
8
 
9
+ # Load .env
10
  load_dotenv()
11
 
12
+ # API keys
13
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
+ TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
15
 
16
+ # LLM
17
  llm = ChatGoogleGenerativeAI(
18
  model="models/gemini-1.5-flash",
19
  google_api_key=GOOGLE_API_KEY
20
  )
21
 
22
+ # Tavily
23
  tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
24
 
25
  def extract_website_text(url):
 
28
  return result[0]["text"]
29
  return "Could not extract content from the URL."
30
 
31
+ # Prompt
32
  prompt = PromptTemplate(
33
  input_variables=["website_content", "question"],
34
  template="""
 
41
  """
42
  )
43
 
 
44
  qa_chain = LLMChain(llm=llm, prompt=prompt)
45
 
46
+ # Streamlit UI
47
+ st.title("🌐 WebQueryBot – Ask any website!")
48
+ url = st.text_input("Enter a website URL:")
49
+ question = st.text_area("What do you want to ask about the website?")
50
+
51
+ if st.button("Get Answer"):
52
+ with st.spinner("Extracting and generating answer..."):
53
+ site_text = extract_website_text(url)
54
+ result = qa_chain.invoke({
55
+ "website_content": site_text,
56
+ "question": question
57
+ })
58
+ st.subheader("✅ Answer")
59
+ st.write(result["text"])