Spaces:
Build error
Build error
File size: 4,680 Bytes
bef21a8 7c31854 bef21a8 7c31854 bef21a8 7c31854 bef21a8 7c31854 bef21a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import streamlit as st
import tempfile
import os
import shutil
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import WebBaseLoader
from langchain.chains.question_answering import load_qa_chain
from langchain_openai import ChatOpenAI
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
# Hardcoded OpenAI API Key
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
# Streamlit UI
st.title("🔍 AI Benefits Analysis for Any Company")
# User input: Only Website URL (with placeholder)
website_url = st.text_input("Enter Website URL", placeholder="e.g., https://www.companywebsite.com")
# Fixed question for AI analysis
fixed_question = (
"Analyze how Artificial Intelligence (AI) can benefit this company based on its industry, "
"key operations, and challenges. Provide insights on AI-driven improvements in customer experience, "
"automation, sales, risk management, decision-making, and innovation. Include an AI implementation roadmap, "
"challenges, solutions, and future opportunities with real-world examples."
)
# Temporary directory to store FAISS index
temp_dir = tempfile.gettempdir()
faiss_db_path = os.path.join(temp_dir, "faiss_index_dir")
# Function to fetch and process website data
def build_embeddings(url):
st.info("Fetching and processing website data...")
# Load website data
loader = WebBaseLoader(url)
raw_text = loader.load()
# Chunking the fetched text
text_splitter = CharacterTextSplitter(separator='\n', chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(raw_text)
# Creating embeddings
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_documents(docs, embeddings)
# Save FAISS index
if os.path.exists(faiss_db_path):
shutil.rmtree(faiss_db_path)
os.makedirs(faiss_db_path)
docsearch.save_local(faiss_db_path)
return docsearch
# Function to save text to a PDF file
def save_text_to_pdf(text, file_path):
c = canvas.Canvas(file_path, pagesize=letter)
width, height = letter
# Define margins
margin_x = 50
margin_y = 50
max_width = width - 2 * margin_x # Usable text width
# Title
c.setFont("Helvetica-Bold", 16)
c.drawString(margin_x, height - margin_y, "AI Benefits Analysis Report")
# Move cursor down
y_position = height - margin_y - 30
c.setFont("Helvetica", 12)
# Function to wrap text within max_width
def wrap_text(text, font_name, font_size, max_width):
words = text.split()
lines = []
current_line = ""
for word in words:
test_line = current_line + " " + word if current_line else word
if stringWidth(test_line, font_name, font_size) <= max_width:
current_line = test_line
else:
lines.append(current_line)
current_line = word
if current_line:
lines.append(current_line)
return lines
# Process text
lines = text.split("\n")
wrapped_lines = []
for line in lines:
wrapped_lines.extend(wrap_text(line, "Helvetica", 12, max_width))
# Write text line by line with proper spacing
for line in wrapped_lines:
if y_position < margin_y: # If at bottom of page, create a new page
c.showPage()
c.setFont("Helvetica", 12)
y_position = height - margin_y
c.drawString(margin_x, y_position, line)
y_position -= 16 # Line spacing
c.save()
# Run everything in one click
if st.button("Get AI Insights") and website_url:
docsearch = build_embeddings(website_url)
# AI Benefits Analysis
st.subheader("💬 AI Benefits Analysis")
chain = load_qa_chain(ChatOpenAI(model="gpt-4o"), chain_type="stuff")
docs = docsearch.similarity_search(fixed_question)
response = chain.run(input_documents=docs, question=fixed_question)
st.write("**AI Insights:**", response)
# Save the AI insights as a PDF
pdf_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
save_text_to_pdf(response, pdf_file.name)
# Provide download link for the generated PDF file
with open(pdf_file.name, "rb") as f:
st.download_button(
label="Download AI Insights as PDF File",
data=f,
file_name="ai_benefits_analysis_report.pdf",
mime="application/pdf"
) |