File size: 2,423 Bytes
98773bf
 
e66df6a
 
98773bf
e66df6a
 
98773bf
 
e66df6a
 
 
98773bf
ff607d3
 
 
 
 
 
 
e66df6a
 
 
 
98773bf
e66df6a
98773bf
 
 
e66df6a
98773bf
e66df6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98773bf
 
e66df6a
98773bf
e66df6a
98773bf
 
e66df6a
98773bf
e66df6a
98773bf
e66df6a
 
 
 
 
98773bf
 
 
e66df6a
98773bf
e66df6a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import streamlit as st
from langchain.prompts import PromptTemplate
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import os

# Authenticate with Hugging Face
# Add your Hugging Face token as an environment variable in Spaces or directly in the code


# Function to get the response back
def getLLMResponse(form_input, email_sender, email_recipient, email_style):
    # Load the tokenizer and model from the gated repository
    
    tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat", use_auth_token=True)
    model = AutoModelForCausalLM.from_pretrained(
    "meta-llama/Llama-2-7b-chat", 
    trust_remote_code=True,
    use_auth_token=True
)

    # Create the pipeline
    generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

    # Template for building the PROMPT
    template = """
    Write an email with {style} style and includes topic: {email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
    \n\nEmail Text:
    """

    # Creating the final PROMPT
    prompt = PromptTemplate(
        input_variables=["style", "email_topic", "sender", "recipient"],
        template=template,
    )

    # Generating the response using the pipeline
    response = generator(
        prompt.format(
            email_topic=form_input,
            sender=email_sender,
            recipient=email_recipient,
            style=email_style,
        ),
        max_length=256,
        temperature=0.7,
    )

    # Extract and return the generated text
    return response[0]["generated_text"]


# Streamlit application setup
st.set_page_config(
    page_title="Generate Emails",
    page_icon="📧",
    layout="centered",
    initial_sidebar_state="collapsed",
)
st.header("Generate Emails 📧")

form_input = st.text_area("Enter the email topic", height=275)

# Creating columns for the UI - To receive inputs from user
col1, col2, col3 = st.columns([10, 10, 5])
with col1:
    email_sender = st.text_input("Sender Name")
with col2:
    email_recipient = st.text_input("Recipient Name")
with col3:
    email_style = st.selectbox(
        "Writing Style",
        ("Formal", "Appreciating", "Not Satisfied", "Neutral"),
        index=0,
    )

submit = st.button("Generate")

# When 'Generate' button is clicked, execute the below code
if submit:
    response = getLLMResponse(form_input, email_sender, email_recipient, email_style)
    st.write(response)