Spaces:
Sleeping
Sleeping
File size: 1,082 Bytes
020ec8e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | import os
import google.generativeai as genai
# Google Gemini API Setup
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
# Model configuration details
generation_config = {
"temperature": 0.5,
"top_p": 0.7,
"top_k": 40,
"max_output_tokens": 8192,
}
# Function to query the Gemini API
def query_gemini(user_input, model_config, session_history):
# Assigning configs to the model
model = genai.GenerativeModel(
model_name="gemini-1.5-flash", generation_config=generation_config, **model_config
)
# Initializing chat history
history = []
# Loads chat history
for index, message in enumerate(session_history):
if index % 2 == 0:
history.append({"role": "user", "parts": [message["content"]]})
else:
history.append({"role": "model", "parts": [message["content"]]})
# Checks the response
try:
response = model.generate_content(history, stream=True)
for chunk in response:
yield chunk.text
except Exception as e:
return f"Error: {str(e)}"
|