Email_chatbot / llm_processing.py
abhishekjoel's picture
Update llm_processing.py
910161f verified
def generate_email(about_text, recipient_name, project_name, key_features, email_style):
# Define email style prompts
style_prompts = {
"Casual": "Compose a friendly and casual email.",
"Professional": "Draft a formal and professional email.",
"Cold": "Write a concise and attention-grabbing cold email."
}
style_intro = style_prompts.get(email_style, "Compose an email")
# Construct the prompt for the LLM
prompt = (
f"{style_intro} Addressing {recipient_name}, based on this profile: {about_text}. "
f"Discuss {project_name} with its key features: {', '.join(key_features)}."
)
try:
# Tokenize the prompt
inputs = tokenizer(prompt, return_tensors='pt')
# Generate the response
with torch.no_grad():
outputs = model.generate(
inputs['input_ids'],
max_length=500, # Adjust the length as needed
num_beams=5, # Beam search parameters for better quality
early_stopping=True
)
# Decode the generated text
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response_text
except Exception as e:
return f"Error: {str(e)}"