Spaces:
Sleeping
Sleeping
File size: 1,453 Bytes
f328654 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | import os
import httpx
API_KEY = os.environ.get("GEMINI_API_KEY", "your-api-key-here")
MODEL_NAME = os.environ.get("MODEL_NAME", "gemini-2.0-flash")
BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
def chat(messages: list[dict], **kwargs) -> str:
headers = {
"Content-Type": "application/json"
}
# Convert messages to Gemini format
contents = []
for msg in messages:
role = "user" if msg["role"] == "user" else "model"
contents.append({
"role": role,
"parts": [{"text": msg["content"]}]
})
payload = {
"contents": contents,
"generationConfig": {
"temperature": kwargs.get("temperature", 1.0),
"maxOutputTokens": kwargs.get("max_tokens", 2048),
"topP": kwargs.get("top_p", 0.95),
"topK": kwargs.get("top_k", 40)
}
}
url = f"{BASE_URL}/models/{MODEL_NAME}:generateContent?key={API_KEY}"
response = httpx.post(
url,
json=payload,
headers=headers,
timeout=60.0
)
if response.status_code != 200:
raise Exception(f"Error: {response.status_code} - {response.text}")
result = response.json()
return result["candidates"][0]["content"]["parts"][0]["text"]
if __name__ == "__main__":
response = chat([
{"role": "user", "content": "Hello! What model are you?"}
])
print(response) |