Spaces:
Sleeping
Sleeping
File size: 5,728 Bytes
9457549 49447a7 c5331aa fede037 49447a7 9457549 fede037 c5331aa 30af1b3 49447a7 924bd16 49447a7 fede037 49447a7 fede037 49447a7 fede037 49447a7 fede037 49447a7 fede037 49447a7 30af1b3 c5331aa 49447a7 c5331aa 49447a7 30af1b3 c5331aa 49447a7 c5331aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import os
import gradio as gr
import logging
import sys
import requests
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
# Retrieve the Groq API key from environment variables
groq_api_key = os.getenv("GROQ_TOKEN")
# Define the model name for Groq API
model_name = "llama-3.1-8b-instant"
# Define maximum tokens allowed for context within the prompt
MAX_CONTEXT_TOKENS = 750
def complete_task(val, instruction, token_limit, temperature=0.2):
try:
prompt = f"{val}\n{instruction}"
headers = {
"Authorization": f"Bearer {groq_api_key}",
"Content-Type": "application/json"
}
payload = {
"messages": [{"role": "user", "content": prompt}],
"model": model_name,
"max_tokens": token_limit,
"temperature": temperature
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=payload, headers=headers)
response_data = response.json()
if response.status_code == 200 and "choices" in response_data:
answer = response_data["choices"][0]["message"]["content"]
logging.info(f"Model output:\n{answer}")
return answer.strip() # Take the full response without truncation
else:
error_message = response_data.get("error", "No response generated.")
logging.warning(f"Model did not generate usable content: {error_message}")
return "No response generated."
except Exception as e:
error_message = f"An error occurred: {str(e)}"
logging.error(error_message)
return error_message
def generate_subject(context):
subject_prompt = "Provide only one concise subject line for an email about the following context."
subject_line = complete_task(context, subject_prompt, token_limit=25, temperature=0.5)
if not subject_line or len(subject_line.split()) > 10:
logging.warning("Generated subject line is too long or empty. Using default subject.")
return "Request for Promotion Based on Project Achievements"
return subject_line
def generate_greeting(name):
greeting_prompt = f"Provide a single, formal greeting for an email addressed to {name}. No alternatives."
greeting = complete_task(name, greeting_prompt, token_limit=20, temperature=0.3)
return greeting if greeting else f"Dear {name},"
def generate_body(context, tone):
body_prompt = f"Write a brief and direct {tone} email body based on the following context: {context}"
body = complete_task(context, body_prompt, token_limit=500, temperature=0.7)
return body if body else "I am reaching out regarding my recent achievements and contributions to request a promotion."
def generate_closing():
closing_prompt = "Provide a single professional closing statement for an email. No alternatives."
closing = complete_task("", closing_prompt, token_limit=30, temperature=0.3)
return closing if closing else "Best regards,\n[Your Name]"
def refine_email(full_email):
cohesion_prompt = "Ensure the following email is clear, professional, and follows standard email format. Remove any extra instructions or notes."
refined_email = complete_task(full_email, cohesion_prompt, token_limit=500, temperature=0.5)
return refined_email if refined_email else full_email
def generate_email(name, recipient_email, industry, recipient_role, context, tone_dropdown, custom_tone):
logging.info("Starting section-wise email generation process with Groq API")
tone = custom_tone.strip() if custom_tone else tone_dropdown or "formal"
logging.info(f"Selected tone: {tone}")
# Generate each section with a fallback if empty
subject = generate_subject(context)
if not subject or subject.strip() == "":
subject = "Request for Promotion Discussion"
greeting = generate_greeting(name)
if not greeting or greeting.strip() == "":
greeting = f"Dear {name},"
body = generate_body(context, tone)
if not body or body.strip() == "":
body = "I am writing to discuss my recent achievements and contributions in hopes of discussing a promotion."
closing = generate_closing()
if not closing or closing.strip() == "":
closing = "Best regards,\n[Your Name]"
# Combine sections into a full email
full_email = f"Subject: {subject}\n\n{greeting}\n\n{body}\n\n{closing}"
logging.info(f"Constructed email before refinement:\n{full_email}")
# Attempt final refinement
refined_email = refine_email(full_email)
if "No response generated" in refined_email or len(refined_email.strip()) == 0:
logging.warning("Refinement failed to produce complete content. Returning unrefined email.")
return full_email
return refined_email
# Gradio Interface
iface = gr.Interface(
fn=generate_email,
inputs=[
gr.Textbox(lines=1, label="Recipient Name"),
gr.Textbox(lines=1, label="Recipient Email"),
gr.Textbox(lines=1, label="Industry"),
gr.Textbox(lines=1, label="Recipient Role"),
gr.Textbox(lines=5, label="Context of Email"),
gr.Dropdown(choices=["formal", "friendly", "persuasive"], label="Tone", value="formal"),
gr.Textbox(lines=1, label="Custom Tone (optional)")
],
outputs=gr.Textbox(lines=10, label="Generated Email"),
title="AI Email Generator",
description="Generate personalized emails based on input details.",
)
if __name__ == '__main__':
logging.info("Launching Gradio interface")
iface.launch()
|