Spaces:
Build error
Build error
File size: 3,651 Bytes
10eddfc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import streamlit as st
import tempfile
import os
import shutil
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import WebBaseLoader
from langchain.chains.question_answering import load_qa_chain
from langchain_openai import ChatOpenAI
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
# Hardcoded OpenAI API Key
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
# Streamlit UI
st.title("🔍 AI Benefits Analysis for Any Company")
# User input: Only Website URL (with placeholder)
website_url = st.text_input("Enter Website URL", placeholder="e.g., https://www.companywebsite.com")
# Fixed question for AI analysis
fixed_question = (
"Analyze how Artificial Intelligence (AI) can benefit this company based on its industry, "
"key operations, and challenges. Provide insights on AI-driven improvements in customer experience, "
"automation, sales, risk management, decision-making, and innovation. Include an AI implementation roadmap, "
"challenges, solutions, and future opportunities with real-world examples."
)
# Temporary directory to store FAISS index
temp_dir = tempfile.gettempdir()
faiss_db_path = os.path.join(temp_dir, "faiss_index_dir")
# Function to fetch and process website data
def build_embeddings(url):
st.info("Fetching and processing website data...")
# Load website data
loader = WebBaseLoader(url)
raw_text = loader.load()
# Chunking the fetched text
text_splitter = CharacterTextSplitter(separator='\n', chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(raw_text)
# Creating embeddings
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(docs, embeddings)
# Save FAISS index
if os.path.exists(faiss_db_path):
shutil.rmtree(faiss_db_path)
os.makedirs(faiss_db_path)
docsearch.save_local(faiss_db_path)
return docsearch
# Function to save text to a PDF file
def save_text_to_pdf(text, file_path):
c = canvas.Canvas(file_path, pagesize=letter)
width, height = letter
# Set title
c.setFont("Helvetica-Bold", 16)
c.drawString(30, height - 50, "AI Benefits Analysis Report")
# Set content font
c.setFont("Helvetica", 12)
# Define starting position for the text
text_object = c.beginText(30, height - 80)
text_object.setFont("Helvetica", 12)
text_object.setTextOrigin(30, height - 80)
# Add the AI Insights text line by line
for line in text.split("\n"):
text_object.textLine(line)
# Draw the text
c.drawText(text_object)
c.save()
# Run everything in one click
if st.button("Get AI Insights") and website_url:
docsearch = build_embeddings(website_url)
# AI Benefits Analysis
st.subheader("💬 AI Benefits Analysis")
chain = load_qa_chain(ChatOpenAI(model="gpt-4o"), chain_type="stuff")
docs = docsearch.similarity_search(fixed_question)
response = chain.run(input_documents=docs, question=fixed_question)
st.write("**AI Insights:**", response)
# Save the AI insights as a PDF
pdf_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
save_text_to_pdf(response, pdf_file.name)
# Provide download link for the generated PDF file
with open(pdf_file.name, "rb") as f:
st.download_button(
label="Download AI Insights as PDF File",
data=f,
file_name="ai_benefits_analysis_report.pdf",
mime="application/pdf"
) |