Spaces:
Sleeping
Sleeping
abhlash
commited on
Commit
·
49447a7
1
Parent(s):
fede037
updated the app and requirments
Browse files- app.py +111 -213
- requirements.txt +1 -3
app.py
CHANGED
|
@@ -1,11 +1,9 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaConfig
|
| 3 |
import os
|
| 4 |
-
|
| 5 |
import logging
|
| 6 |
import sys
|
| 7 |
-
|
| 8 |
-
import
|
| 9 |
|
| 10 |
# Load environment variables
|
| 11 |
load_dotenv()
|
|
@@ -13,227 +11,127 @@ load_dotenv()
|
|
| 13 |
# Setup logging
|
| 14 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
model_name = "meta-llama/Llama-3.1-8B"
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
login(token=hf_token)
|
| 23 |
-
api = HfApi()
|
| 24 |
-
api.whoami()
|
| 25 |
-
logging.info("Successfully logged in to Hugging Face")
|
| 26 |
-
except Exception as e:
|
| 27 |
-
logging.error(f"Error authenticating with Hugging Face: {str(e)}")
|
| 28 |
-
logging.warning("Proceeding without authentication. This may limit access to certain models.")
|
| 29 |
-
else:
|
| 30 |
-
logging.warning("HUGGINGFACE_TOKEN not found in environment variables. Proceeding without authentication.")
|
| 31 |
-
|
| 32 |
-
# Load the model and tokenizer
|
| 33 |
-
try:
|
| 34 |
-
logging.info(f"Attempting to load tokenizer for {model_name}")
|
| 35 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 36 |
-
logging.info("Tokenizer loaded successfully")
|
| 37 |
-
|
| 38 |
-
logging.info(f"Attempting to load model {model_name}")
|
| 39 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 40 |
-
logging.info("Model loaded successfully")
|
| 41 |
-
|
| 42 |
-
if tokenizer.pad_token is None:
|
| 43 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 44 |
-
model.config.pad_token_id = model.config.eos_token_id
|
| 45 |
-
|
| 46 |
-
logging.info(f"Successfully loaded {model_name}")
|
| 47 |
-
except Exception as e:
|
| 48 |
-
logging.error(f"Error loading {model_name}: {str(e)}")
|
| 49 |
-
raise
|
| 50 |
-
|
| 51 |
-
MAX_TOTAL_TOKENS = 2048 # Adjusted to Llama model's token limit
|
| 52 |
-
MAX_INPUT_TOKENS = 1600 # 1600 tokens for input, leaving room for generated output
|
| 53 |
-
CONTEXT_RATIO = 0.6 # Adjusted for summarization
|
| 54 |
-
|
| 55 |
-
def truncate_to_token_limit(text, max_tokens):
|
| 56 |
-
tokens = tokenizer.encode(text)
|
| 57 |
-
if len(tokens) > max_tokens:
|
| 58 |
-
tokens = tokens[:max_tokens]
|
| 59 |
-
return tokenizer.decode(tokens, skip_special_tokens=True)
|
| 60 |
-
|
| 61 |
-
def summarize_text(text, max_tokens):
|
| 62 |
-
if len(tokenizer.encode(text)) <= max_tokens:
|
| 63 |
-
return text, None
|
| 64 |
-
|
| 65 |
-
summarization_prompt = f"""
|
| 66 |
-
Summarize the following text concisely, preserving the key points:
|
| 67 |
-
|
| 68 |
-
{text}
|
| 69 |
-
|
| 70 |
-
Ensure the summary is under {max_tokens} tokens.
|
| 71 |
-
"""
|
| 72 |
-
|
| 73 |
-
try:
|
| 74 |
-
inputs = tokenizer(summarization_prompt, return_tensors="pt", padding=True, truncation=True, max_length=int(MAX_INPUT_TOKENS * CONTEXT_RATIO))
|
| 75 |
-
|
| 76 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 77 |
-
model.to(device)
|
| 78 |
-
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 79 |
-
|
| 80 |
-
with torch.no_grad():
|
| 81 |
-
summary_outputs = model.generate(
|
| 82 |
-
**inputs,
|
| 83 |
-
max_new_tokens=max_tokens,
|
| 84 |
-
num_return_sequences=1,
|
| 85 |
-
temperature=0.7,
|
| 86 |
-
top_k=50,
|
| 87 |
-
top_p=0.95,
|
| 88 |
-
do_sample=True
|
| 89 |
-
)
|
| 90 |
-
|
| 91 |
-
summary = tokenizer.decode(summary_outputs[0], skip_special_tokens=True)
|
| 92 |
-
summary = summary.replace(summarization_prompt, "").strip()
|
| 93 |
-
|
| 94 |
-
warning = "Input was summarized to fit the token limit. Some details may be omitted."
|
| 95 |
-
return summary, warning
|
| 96 |
-
except Exception as e:
|
| 97 |
-
logging.error(f"Error during summarization: {str(e)}")
|
| 98 |
-
return text[:max_tokens] + "...", "Error in summarization. Text was truncated."
|
| 99 |
-
|
| 100 |
-
def generate_prompt(recipient_name, recipient_role, industry, details):
|
| 101 |
-
details, warning = summarize_text(details, MAX_INPUT_TOKENS // 2)
|
| 102 |
-
|
| 103 |
-
prompt_generation_input = f"""
|
| 104 |
-
Create a detailed prompt for writing a professional email based on the following information:
|
| 105 |
-
- Recipient: {recipient_name}, a {recipient_role} in the {industry} industry
|
| 106 |
-
- Purpose: {details}
|
| 107 |
-
|
| 108 |
-
Include:
|
| 109 |
-
1. Greeting
|
| 110 |
-
2. Main email points
|
| 111 |
-
3. Suggested closing
|
| 112 |
-
4. Tone (e.g., formal, friendly)
|
| 113 |
-
5. Industry-relevant phrases or terms
|
| 114 |
-
"""
|
| 115 |
-
|
| 116 |
-
prompt_generation_input = truncate_to_token_limit(prompt_generation_input, MAX_INPUT_TOKENS)
|
| 117 |
-
|
| 118 |
-
try:
|
| 119 |
-
inputs = tokenizer(prompt_generation_input, return_tensors="pt", padding=True, truncation=True, max_length=MAX_INPUT_TOKENS)
|
| 120 |
-
|
| 121 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 122 |
-
model.to(device)
|
| 123 |
-
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 124 |
-
|
| 125 |
-
with torch.no_grad():
|
| 126 |
-
prompt_outputs = model.generate(
|
| 127 |
-
**inputs,
|
| 128 |
-
max_new_tokens=200,
|
| 129 |
-
num_return_sequences=1,
|
| 130 |
-
temperature=0.7,
|
| 131 |
-
top_k=50,
|
| 132 |
-
top_p=0.95,
|
| 133 |
-
do_sample=True
|
| 134 |
-
)
|
| 135 |
-
|
| 136 |
-
generated_prompt = tokenizer.decode(prompt_outputs[0], skip_special_tokens=True)
|
| 137 |
-
return generated_prompt.replace(prompt_generation_input, "").strip(), warning
|
| 138 |
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
def generate_email_body(prompt):
|
| 144 |
-
# Concise prompt without instruction language
|
| 145 |
-
email_generation_input = f"""
|
| 146 |
-
{prompt}
|
| 147 |
-
"""
|
| 148 |
-
|
| 149 |
-
# Limit input to token constraints
|
| 150 |
-
email_generation_input = truncate_to_token_limit(email_generation_input, MAX_INPUT_TOKENS)
|
| 151 |
-
|
| 152 |
try:
|
| 153 |
-
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
do_sample=False # Deterministic output
|
| 168 |
-
)
|
| 169 |
-
|
| 170 |
-
# Decode and return only the email body
|
| 171 |
-
email_body = tokenizer.decode(email_outputs[0], skip_special_tokens=True).strip()
|
| 172 |
-
|
| 173 |
-
return email_body
|
| 174 |
-
|
| 175 |
except Exception as e:
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
iface = gr.Interface(
|
| 225 |
fn=generate_email,
|
| 226 |
inputs=[
|
| 227 |
gr.Textbox(lines=1, label="Recipient Name"),
|
| 228 |
gr.Textbox(lines=1, label="Recipient Email"),
|
| 229 |
-
gr.Textbox(lines=1, label="Industry
|
| 230 |
-
gr.Textbox(lines=1, label="Recipient Role
|
| 231 |
-
gr.Textbox(lines=5, label="
|
|
|
|
|
|
|
| 232 |
],
|
| 233 |
-
outputs=gr.Textbox(lines=10, label="Generated Email
|
| 234 |
-
title="
|
| 235 |
-
description="
|
| 236 |
)
|
| 237 |
|
| 238 |
if __name__ == '__main__':
|
|
|
|
| 239 |
iface.launch()
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
import logging
|
| 4 |
import sys
|
| 5 |
+
import requests
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
|
| 8 |
# Load environment variables
|
| 9 |
load_dotenv()
|
|
|
|
| 11 |
# Setup logging
|
| 12 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
|
| 13 |
|
| 14 |
+
# Retrieve the Groq API key from environment variables
|
| 15 |
+
groq_api_key = os.getenv("GROQ_TOKEN")
|
|
|
|
| 16 |
|
| 17 |
+
# Define the model name for Groq API
|
| 18 |
+
model_name = "llama-3.1-8b-instant"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
# Define maximum tokens allowed for context within the prompt
|
| 21 |
+
MAX_CONTEXT_TOKENS = 750
|
| 22 |
+
|
| 23 |
+
def complete_task(val, instruction, token_limit, temperature=0.2):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
try:
|
| 25 |
+
prompt = f"{val}\n{instruction}"
|
| 26 |
|
| 27 |
+
headers = {
|
| 28 |
+
"Authorization": f"Bearer {groq_api_key}",
|
| 29 |
+
"Content-Type": "application/json"
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
payload = {
|
| 33 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 34 |
+
"model": model_name,
|
| 35 |
+
"max_tokens": token_limit,
|
| 36 |
+
"temperature": temperature
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=payload, headers=headers)
|
| 40 |
+
response_data = response.json()
|
| 41 |
|
| 42 |
+
if response.status_code == 200 and "choices" in response_data:
|
| 43 |
+
answer = response_data["choices"][0]["message"]["content"]
|
| 44 |
+
logging.info(f"Model output:\n{answer}")
|
| 45 |
+
return answer.strip() # Take the full response without truncation
|
| 46 |
+
else:
|
| 47 |
+
error_message = response_data.get("error", "No response generated.")
|
| 48 |
+
logging.warning(f"Model did not generate usable content: {error_message}")
|
| 49 |
+
return "No response generated."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
except Exception as e:
|
| 51 |
+
error_message = f"An error occurred: {str(e)}"
|
| 52 |
+
logging.error(error_message)
|
| 53 |
+
return error_message
|
| 54 |
+
|
| 55 |
+
def generate_subject(context):
|
| 56 |
+
subject_prompt = "Provide only one concise subject line for an email about the following context."
|
| 57 |
+
subject_line = complete_task(context, subject_prompt, token_limit=25, temperature=0.5)
|
| 58 |
+
if not subject_line or len(subject_line.split()) > 10:
|
| 59 |
+
logging.warning("Generated subject line is too long or empty. Using default subject.")
|
| 60 |
+
return "Request for Promotion Based on Project Achievements"
|
| 61 |
+
return subject_line
|
| 62 |
+
|
| 63 |
+
def generate_greeting(name):
|
| 64 |
+
greeting_prompt = f"Provide a single, formal greeting for an email addressed to {name}. No alternatives."
|
| 65 |
+
greeting = complete_task(name, greeting_prompt, token_limit=20, temperature=0.3)
|
| 66 |
+
return greeting if greeting else f"Dear {name},"
|
| 67 |
+
|
| 68 |
+
def generate_body(context, tone):
|
| 69 |
+
body_prompt = f"Write a brief and direct {tone} email body based on the following context: {context}"
|
| 70 |
+
body = complete_task(context, body_prompt, token_limit=500, temperature=0.7)
|
| 71 |
+
return body if body else "I am reaching out regarding my recent achievements and contributions to request a promotion."
|
| 72 |
+
|
| 73 |
+
def generate_closing():
|
| 74 |
+
closing_prompt = "Provide a single professional closing statement for an email. No alternatives."
|
| 75 |
+
closing = complete_task("", closing_prompt, token_limit=30, temperature=0.3)
|
| 76 |
+
return closing if closing else "Best regards,\n[Your Name]"
|
| 77 |
+
|
| 78 |
+
def refine_email(full_email):
|
| 79 |
+
cohesion_prompt = "Ensure the following email is clear, professional, and follows standard email format. Remove any extra instructions or notes."
|
| 80 |
+
refined_email = complete_task(full_email, cohesion_prompt, token_limit=500, temperature=0.5)
|
| 81 |
+
return refined_email if refined_email else full_email
|
| 82 |
+
|
| 83 |
+
def generate_email(name, recipient_email, industry, recipient_role, context, tone_dropdown, custom_tone):
|
| 84 |
+
logging.info("Starting section-wise email generation process with Groq API")
|
| 85 |
+
|
| 86 |
+
tone = custom_tone.strip() if custom_tone else tone_dropdown or "formal"
|
| 87 |
+
logging.info(f"Selected tone: {tone}")
|
| 88 |
+
|
| 89 |
+
# Generate each section with a fallback if empty
|
| 90 |
+
subject = generate_subject(context)
|
| 91 |
+
if not subject or subject.strip() == "":
|
| 92 |
+
subject = "Request for Promotion Discussion"
|
| 93 |
+
|
| 94 |
+
greeting = generate_greeting(name)
|
| 95 |
+
if not greeting or greeting.strip() == "":
|
| 96 |
+
greeting = f"Dear {name},"
|
| 97 |
+
|
| 98 |
+
body = generate_body(context, tone)
|
| 99 |
+
if not body or body.strip() == "":
|
| 100 |
+
body = "I am writing to discuss my recent achievements and contributions in hopes of discussing a promotion."
|
| 101 |
+
|
| 102 |
+
closing = generate_closing()
|
| 103 |
+
if not closing or closing.strip() == "":
|
| 104 |
+
closing = "Best regards,\n[Your Name]"
|
| 105 |
+
|
| 106 |
+
# Combine sections into a full email
|
| 107 |
+
full_email = f"Subject: {subject}\n\n{greeting}\n\n{body}\n\n{closing}"
|
| 108 |
+
logging.info(f"Constructed email before refinement:\n{full_email}")
|
| 109 |
+
|
| 110 |
+
# Attempt final refinement
|
| 111 |
+
refined_email = refine_email(full_email)
|
| 112 |
+
if "No response generated" in refined_email or len(refined_email.strip()) == 0:
|
| 113 |
+
logging.warning("Refinement failed to produce complete content. Returning unrefined email.")
|
| 114 |
+
return full_email
|
| 115 |
+
|
| 116 |
+
return refined_email
|
| 117 |
+
|
| 118 |
+
# Gradio Interface
|
| 119 |
iface = gr.Interface(
|
| 120 |
fn=generate_email,
|
| 121 |
inputs=[
|
| 122 |
gr.Textbox(lines=1, label="Recipient Name"),
|
| 123 |
gr.Textbox(lines=1, label="Recipient Email"),
|
| 124 |
+
gr.Textbox(lines=1, label="Industry"),
|
| 125 |
+
gr.Textbox(lines=1, label="Recipient Role"),
|
| 126 |
+
gr.Textbox(lines=5, label="Context of Email"),
|
| 127 |
+
gr.Dropdown(choices=["formal", "friendly", "persuasive"], label="Tone", value="formal"),
|
| 128 |
+
gr.Textbox(lines=1, label="Custom Tone (optional)")
|
| 129 |
],
|
| 130 |
+
outputs=gr.Textbox(lines=10, label="Generated Email"),
|
| 131 |
+
title="AI Email Generator",
|
| 132 |
+
description="Generate personalized emails based on input details.",
|
| 133 |
)
|
| 134 |
|
| 135 |
if __name__ == '__main__':
|
| 136 |
+
logging.info("Launching Gradio interface")
|
| 137 |
iface.launch()
|
requirements.txt
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
gradio==3.50.2
|
| 2 |
-
|
| 3 |
-
torch==2.1.2
|
| 4 |
-
huggingface_hub==0.20.2
|
| 5 |
python-dotenv==1.0.0
|
|
|
|
| 1 |
gradio==3.50.2
|
| 2 |
+
requests==2.31.0
|
|
|
|
|
|
|
| 3 |
python-dotenv==1.0.0
|