Spaces:
Paused
Paused
Create llm_clients.py
Browse files- llm_clients.py +21 -0
llm_clients.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from groq import Groq
|
| 3 |
+
import google.generativeai as genai
|
| 4 |
+
|
| 5 |
+
# Load Keys
|
| 6 |
+
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 7 |
+
|
| 8 |
+
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
| 9 |
+
gemini_model = genai.GenerativeModel("gemini-1.5-flash")
|
| 10 |
+
|
| 11 |
+
def call_llama(prompt):
|
| 12 |
+
response = groq_client.chat.completions.create(
|
| 13 |
+
model="llama-3.1-8b-instant",
|
| 14 |
+
messages=[{"role": "user", "content": prompt}]
|
| 15 |
+
)
|
| 16 |
+
return response.choices[0].message.content
|
| 17 |
+
|
| 18 |
+
def call_gemini(prompt):
|
| 19 |
+
response = gemini_model.generate_content(prompt)
|
| 20 |
+
return response.text
|
| 21 |
+
|