Spaces:
Sleeping
Sleeping
File size: 1,088 Bytes
3107242 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import os
from dotenv import load_dotenv
import logging
from langchain_google_genai import ChatGoogleGenerativeAI
load_dotenv(dotenv_path="src/configs/.env")
class GeminiWrapper:
def __init__(self, model_name="gemini-2.5-flash", temperature=0):
self.model_name = model_name
self.llm = ChatGoogleGenerativeAI(
model=model_name,
temperature=temperature,
google_api_key=os.getenv("GEMINI_API_KEY")
)
def generate(self, prompt, history=None):
if history:
# Prepend history to the prompt for context
#history_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
prompt = f"{history}\nUser: {prompt}"
return self._generate(prompt)
def _generate(self, prompt):
try:
return self.llm.invoke(prompt).content
except Exception as e:
logging.error(f"Gemini generation failed: {e}")
return "Error generating response."
def bind_tools(self, tools):
return self.llm.bind_tools(tools)
|