PPRA-Copilot / model_config.py
Shami96's picture
Update model_config.py
450e661 verified
raw
history blame contribute delete
850 Bytes
from groq import Groq
def get_model():
client = Groq(api_key="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") # Replace with your actual key
def generate(text):
try:
# Ensure text doesn't exceed token limits (approx 4 chars per token)
if len(text) > 24000: # Conservative limit (6000 tokens * 4 chars)
text = text[:24000] + "...[content truncated due to length]"
response = client.chat.completions.create(
model="llama3-8b-8192",
messages=[{"role": "user", "content": text}],
max_tokens=2000 # Set a max output token limit
)
return response.choices[0].message.content
except Exception as e:
return f"Error generating response: {str(e)}"
return generate