File size: 1,785 Bytes
ab3fb03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from openai import OpenAI
from dotenv import load_dotenv
import os
import requests
import PyPDF2


load_dotenv()
api_key = os.getenv("GEMINI_API_KEY")
model="gemini-2.5-flash-lite"
base_url = "https://generativelanguage.googleapis.com/v1beta/openai/"
client = OpenAI(base_url=base_url, api_key=api_key)

url="https://raw.githubusercontent.com/hereandnowai/sathyabama-be-cse-ai-pt1-07-2025-hands-on-professional-training-on-genai-and-ai-agents/main/general-profile-of-hereandnowai.pdf"
response = requests.get(url)

PDF_FILE_NAME ="profile-of-hereandnowai.pdf"
PDF_DIR = os.path.dirname(__file__)
PDF_PATH = os.path.join(PDF_DIR, PDF_FILE_NAME)

with open(PDF_PATH, "wb") as f:
    f.write(response.content)

try:
  with open(PDF_PATH, "rb") as file:
    reader = PyPDF2.PdfReader(file)
    pdf_text_chunks = []
    for page in reader.pages:
        page_text = page.extract_text()
        if page_text:
            pdf_text_chunks.append(page_text.strip())
    pdf_context = "\n".join(pdf_text_chunks) if pdf_text_chunks else "Error extracting text from PDF file."
except Exception as e:
   print(f"Error reading the PDF file: {e}")
   pdf_context = "Error extracting text from PDF file."  



def get_response(message, history):
   system_prompt = f"""context from {PDF_PATH}:\n{pdf_context}

         \n\n Qustion:{message}

         \n\n Answer based only on the context:"""
   messages = [{"role": "system", "content": system_prompt}]
   messages.extend(history)
   messages.append({"role": "user", "content": message})
   response = client.chat.completions.create(model=model, messages=messages)
   ai_response = response.choices[0].message.content
   return ai_response

print(get_response("who is the ceo of here and now ai?", []))