Spaces:
Sleeping
Sleeping
| # program.py | |
| # AI HR Chatbot Backend for Caramel AI | |
| import os | |
| import requests | |
| import google.generativeai as genai | |
| from dotenv import load_dotenv | |
| import PyPDF2 # <-- Add this import | |
| # --- SETUP --- | |
| # (This existing code is unchanged) | |
| load_dotenv() | |
| api_key = os.getenv("GEMINI_API_KEY") | |
| if not api_key: | |
| raise ValueError("GEMINI_API_KEY not found. Please create a .env file with your API key.") | |
| genai.configure(api_key=api_key) | |
| try: | |
| print("Fetching knowledge base from URL...") | |
| txt_url = "https://raw.githubusercontent.com/hereandnowai/vac/refs/heads/master/prospectus-context.txt" | |
| response = requests.get(txt_url) | |
| response.raise_for_status() | |
| text_lines = response.text.splitlines() | |
| text_context = "\n".join([line.strip() for line in text_lines if line.strip()]) | |
| print("✅ Knowledge base loaded successfully.") | |
| except requests.RequestException as e: | |
| print(f"[ERROR loading context from URL] {e}") | |
| text_context = "No text context available." | |
| system_prompt = f""" | |
| You are Caramel AI, a fair and approachable Human Resources Manager. | |
| Your mission is to explain workplace policies, provide general advice on employee relations, | |
| and answer questions about recruitment and professional development. | |
| Always promote a positive and inclusive work environment. | |
| ⚡ Important: | |
| - Your first response in any new conversation must be to introduce yourself as: | |
| “Caramel AI – AI Human Resource Manager, built at HERE AND NOW AI – Artificial Intelligence Research Institute.” | |
| - You must use ONLY the provided Workplace Policy Context to answer questions. If the answer | |
| is not in the context, say "I'm sorry, but that information is not available in my knowledge base." | |
| --- Workplace Policy Context --- | |
| {text_context} | |
| """ | |
| model = genai.GenerativeModel( | |
| model_name="gemini-1.5-flash", | |
| system_instruction=system_prompt | |
| ) | |
| # --- CORE FUNCTION --- | |
| # (This existing function is unchanged) | |
| def get_response(message: str, history: list) -> str: | |
| """ | |
| Gets a response from the Gemini model based on a message and conversation history. | |
| """ | |
| conversation = history + [{"role": "user", "parts": [message]}] | |
| try: | |
| response = model.generate_content(conversation) | |
| return response.text.strip() | |
| except Exception as e: | |
| print(f"Error during API call: {e}") | |
| return f"⚠️ I'm sorry, I encountered an error. Please try again. Error: {e}" | |
| # --- NEW FEATURE: FILE PROCESSING FUNCTION --- | |
| def extract_text_from_file(file_obj): | |
| """ | |
| Extracts text from an uploaded file object (supports .txt and .pdf). | |
| Args: | |
| file_obj: A file object from Gradio's gr.File component. | |
| Returns: | |
| A string containing the extracted text or an error message. | |
| """ | |
| if file_obj is None: | |
| return "Error: No file object received." | |
| file_path = file_obj.name | |
| print(f"Processing file: {file_path}") | |
| try: | |
| if file_path.lower().endswith('.txt'): | |
| with open(file_path, 'r', encoding='utf-8') as f: | |
| return f.read() | |
| elif file_path.lower().endswith('.pdf'): | |
| reader = PyPDF2.PdfReader(file_path) | |
| text_chunks = [page.extract_text() for page in reader.pages if page.extract_text()] | |
| return "\n".join(text_chunks) | |
| else: | |
| return "Error: Unsupported file type. Please upload a .txt or .pdf file." | |
| except Exception as e: | |
| print(f"Error processing file {file_path}: {e}") | |
| return f"Error: Could not process the file. It may be corrupted. Details: {e}" |