Ali Abdullah
Update main.py
4adcefb verified
from fastapi import FastAPI, UploadFile, File, Form
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from groq import Groq
from langchain_community.document_loaders import WebBaseLoader
import os
import io
from dotenv import load_dotenv
from PIL import Image
import pytesseract
import whisper
from docx import Document
import pandas as pd
import PyPDF2
# Load environment variables
load_dotenv()
# Set paths for OCR & audio
pytesseract.pytesseract.tesseract_cmd = os.getenv("TESSERACT_CMD", "/usr/bin/tesseract")
os.environ["PATH"] += os.pathsep + os.getenv("FFMPEG_PATH", "/usr/bin")
app = FastAPI()
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
UPLOAD_DIR = "uploaded_files"
os.makedirs(UPLOAD_DIR, exist_ok=True)
MAX_FILE_SIZE_MB = 10
def extract_text_from_file(file_path):
ext = os.path.splitext(file_path)[-1].lower()
if ext == ".txt":
with open(file_path, "r", encoding="utf-8") as f:
return f.read()
elif ext == ".docx":
doc = Document(file_path)
return "\n".join([p.text for p in doc.paragraphs])
elif ext == ".csv":
df = pd.read_csv(file_path)
return df.to_string(index=False)
elif ext == ".pdf":
with open(file_path, "rb") as f:
reader = PyPDF2.PdfReader(f)
return "\n".join([page.extract_text() for page in reader.pages if page.extract_text()])
return "❌ Unsupported file type."
@app.post("/chat-with-file")
async def chat_with_file(file: UploadFile = File(...), question: str = Form(...)):
try:
contents = await file.read()
if len(contents) > MAX_FILE_SIZE_MB * 1024 * 1024:
return JSONResponse(status_code=400, content={"error": "❌ File too large. Max 10MB."})
path = os.path.join(UPLOAD_DIR, file.filename)
with open(path, "wb") as f:
f.write(contents)
text = extract_text_from_file(path)
response = client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer using the uploaded file."},
{"role": "user", "content": f"{text}\n\nQuestion: {question}"}
]
)
return {"answer": response.choices[0].message.content}
except Exception as e:
return JSONResponse(status_code=500, content={"error": str(e)})
class URLQuery(BaseModel):
url: str
question: str
@app.post("/chat-with-url")
async def chat_with_url(data: URLQuery):
try:
loader = WebBaseLoader(data.url, header_template={"User-Agent": "Mozilla/5.0"})
docs = loader.load()
content = "\n".join([doc.page_content for doc in docs])
response = client.chat.completions.create(
model="llama3-8b-8192",
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer using the webpage content."},
{"role": "user", "content": f"Web Content:\n{content}\n\nQuestion: {data.question}"}
]
)
return {"answer": response.choices[0].message.content}
except Exception as e:
return JSONResponse(status_code=500, content={"error": str(e)})
@app.post("/extract-text-from-image")
async def extract_text_from_image(file: UploadFile = File(...)):
try:
contents = await file.read()
image = Image.open(io.BytesIO(contents)).convert("L")
image = image.resize((image.width * 2, image.height * 2))
text = pytesseract.image_to_string(image, lang="eng")
return {"answer": text.strip() or "⚠️ No text extracted."}
except Exception as e:
return JSONResponse(status_code=500, content={"error": str(e)})
@app.post("/transcribe-audio")
async def transcribe_audio(file: UploadFile = File(...)):
try:
contents = await file.read()
path = os.path.join(UPLOAD_DIR, file.filename)
with open(path, "wb") as f:
f.write(contents)
model = whisper.load_model("base")
result = model.transcribe(path)
return {"answer": result.get("text", "⚠️ No transcript returned.")}
except Exception as e:
return JSONResponse(status_code=500, content={"error": str(e)})