Spaces:
Sleeping
Sleeping
| # agents/reply_agent.py | |
| import os | |
| from groq import AsyncGroq | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| client = AsyncGroq(api_key=os.getenv("GROQ_API_KEY")) | |
| async def generate_smart_reply(text: str, sender: str, tone: str) -> str: | |
| """ | |
| Generates a context-aware email reply using Groq's fast inference. | |
| """ | |
| prompt = f""" | |
| You are a highly intelligent email assistant helping Ayoub draft a reply. | |
| Original Email from {sender}: | |
| \"\"\" | |
| {text} | |
| \"\"\" | |
| Task: Draft a complete, ready-to-send reply. | |
| Tone: {tone} | |
| Strict Instructions: | |
| - Go straight to the email body. Do not include conversational preambles like "Here is the draft:". | |
| - Sign off the email as Ayoub. | |
| - Ensure there are absolutely no placeholder brackets (like [Insert Name Here]). | |
| - Keep it concise and professional. | |
| """ | |
| try: | |
| response = await client.chat.completions.create( | |
| messages=[ | |
| {"role": "system", "content": "You are an expert email communicator."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| # --- FIX: Updated to the currently supported, lightning-fast Llama 3.1 model --- | |
| model="llama-3.1-8b-instant", | |
| temperature=0.6, | |
| max_tokens=500 | |
| ) | |
| return response.choices[0].message.content.strip() | |
| except Exception as e: | |
| print(f"⚠️ Groq API Error: {e}") | |
| raise e |