ecom_Chat / gemini_client.py
Saurabh502's picture
Upload 6 files
078556c verified
import os
import google.generativeai as genai
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Configure the Gemini API
api_key = os.getenv("GEMINI_API_KEY")
if not api_key:
raise ValueError("GEMINI_API_KEY not found in .env file or environment variables.")
genai.configure(api_key=api_key)
# Initialize the generative model
# Using gemini-1.0-pro, check documentation for other available models
model = genai.GenerativeModel('gemini-2.0-flash')
def get_gemini_response(prompt):
"""
Sends a prompt to the Gemini API and returns the generated text response.
Args:
prompt (str): The complete prompt including context and user query.
Returns:
str: The text response from the Gemini model, or an error message.
"""
try:
response = model.generate_content(prompt)
# Accessing the text part of the response
# Handle potential cases where response might not have 'text' directly
if response.parts:
# Assuming the first part contains the text response
# Adjust if the API response structure differs or has multiple parts
return response.parts[0].text
elif hasattr(response, 'text'):
# Fallback if 'text' attribute exists directly (older versions/different models)
return response.text
else:
# If no text found, return the raw response or an error indicator
# Log the full response for debugging if necessary
print(f"Unexpected Gemini response structure: {response}")
return "Error: Could not extract text from Gemini response."
except Exception as e:
print(f"An error occurred while calling the Gemini API: {e}")
# Consider more specific error handling based on potential API errors
return f"Error communicating with AI assistant: {e}"
# Example usage (optional, for testing)
if __name__ == "__main__":
print("Testing Gemini Client...")
test_prompt = "Explain what an e-commerce order status 'Shipped' means in simple terms."
print(f"\nSending prompt: {test_prompt}")
response_text = get_gemini_response(test_prompt)
print(f"\nGemini Response:\n{response_text}")