kapil-vps / scripts /llm /runner.py
ayushsinghal1510's picture
Intial Commit
34df346
raw
history blame contribute delete
620 Bytes
async def run_gemini(
gemini_client ,
contents ,
generation_config ,
model
) -> str :
response = ''
for chunk in gemini_client.models.generate_content_stream(
model = model ,
contents = contents ,
config = generation_config
) :
if chunk.text : response += chunk.text
return response
async def run_groq(messages : list , groq_client) :
chat_completion = groq_client.chat.completions.create(
messages = messages ,
model = 'llama-3.3-70b-versatile'
)
return chat_completion.choices[0].message.content