Nicolás Larenas commited on
Update ai_model.py
Browse files- ai_model.py +11 -70
ai_model.py
CHANGED
|
@@ -1,80 +1,21 @@
|
|
| 1 |
-
# ai_model.py
|
| 2 |
-
|
| 3 |
-
import google.generativeai as genai
|
| 4 |
import os
|
| 5 |
-
import
|
| 6 |
-
from config import (
|
| 7 |
-
SYSTEM_INSTRUCTION,
|
| 8 |
-
MODEL_NAME,
|
| 9 |
-
DEFAULT_MAX_OUTPUT_TOKENS,
|
| 10 |
-
DEFAULT_TEMPERATURE,
|
| 11 |
-
DEFAULT_TOP_P,
|
| 12 |
-
DEFAULT_TOP_K,
|
| 13 |
-
DEFAULT_STOP_SEQUENCES,
|
| 14 |
-
)
|
| 15 |
import asyncio
|
| 16 |
-
from typing import List, Optional # <-- Added this import
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
logging.basicConfig(level=logging.ERROR)
|
| 20 |
-
|
| 21 |
-
# Load Google API key from environment
|
| 22 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
| 23 |
if not GOOGLE_API_KEY:
|
| 24 |
-
raise ValueError("
|
| 25 |
|
|
|
|
| 26 |
genai.configure(api_key=GOOGLE_API_KEY)
|
|
|
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
|
| 30 |
-
model_name=MODEL_NAME,
|
| 31 |
-
system_instruction=SYSTEM_INSTRUCTION
|
| 32 |
-
)
|
| 33 |
-
|
| 34 |
-
# Query AI model
|
| 35 |
-
async def query_ai_model(
|
| 36 |
-
message: str,
|
| 37 |
-
history: Optional[List[dict]] = None,
|
| 38 |
-
max_output_tokens: int = DEFAULT_MAX_OUTPUT_TOKENS,
|
| 39 |
-
temperature: float = DEFAULT_TEMPERATURE,
|
| 40 |
-
top_p: float = DEFAULT_TOP_P,
|
| 41 |
-
top_k: int = DEFAULT_TOP_K,
|
| 42 |
-
stop_sequences: Optional[List[str]] = DEFAULT_STOP_SEQUENCES,
|
| 43 |
-
):
|
| 44 |
try:
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
messages.append({'role': 'user', 'content': message})
|
| 48 |
-
|
| 49 |
-
# Set generation configuration
|
| 50 |
-
generation_config = genai.types.GenerationConfig(
|
| 51 |
-
temperature=temperature,
|
| 52 |
-
top_p=top_p,
|
| 53 |
-
top_k=top_k,
|
| 54 |
-
max_output_tokens=max_output_tokens,
|
| 55 |
-
stop_sequences=stop_sequences,
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
# Generate response
|
| 59 |
-
response = model.generate_content(
|
| 60 |
-
messages=messages,
|
| 61 |
-
generation_config=generation_config
|
| 62 |
-
)
|
| 63 |
-
|
| 64 |
-
# Extract the assistant's reply
|
| 65 |
-
assistant_reply = {'role': 'assistant', 'content': response.text}
|
| 66 |
-
|
| 67 |
-
return assistant_reply
|
| 68 |
except Exception as e:
|
| 69 |
-
|
| 70 |
-
return
|
| 71 |
-
|
| 72 |
-
# Preprocess chat history to the required format
|
| 73 |
-
def preprocess_chat_history(history: List[tuple]) -> List[dict]:
|
| 74 |
-
messages = []
|
| 75 |
-
for user_message, model_message in history:
|
| 76 |
-
if user_message is not None:
|
| 77 |
-
messages.append({'role': 'user', 'content': user_message})
|
| 78 |
-
if model_message is not None:
|
| 79 |
-
messages.append({'role': 'assistant', 'content': model_message})
|
| 80 |
-
return messages
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import google.generativeai as genai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import asyncio
|
|
|
|
| 4 |
|
| 5 |
+
# Load API key from environment variable
|
|
|
|
|
|
|
|
|
|
| 6 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
| 7 |
if not GOOGLE_API_KEY:
|
| 8 |
+
raise ValueError("Google API key not set in the environment variables.")
|
| 9 |
|
| 10 |
+
# Configure Google AI model
|
| 11 |
genai.configure(api_key=GOOGLE_API_KEY)
|
| 12 |
+
model = genai.GenerativeModel('gemini-pro')
|
| 13 |
|
| 14 |
+
# Function to query AI model
|
| 15 |
+
async def query_ai_model(prompt: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
try:
|
| 17 |
+
response = model.generate_content(prompt)
|
| 18 |
+
return response.text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
except Exception as e:
|
| 20 |
+
print(f"Error querying AI model: {e}")
|
| 21 |
+
return "Sorry, I couldn't process your request."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|