File size: 2,644 Bytes
4d73b93
 
 
 
d27e157
4d73b93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import streamlit as st
from openai import OpenAI
import os

API_KEY = os.getenv("LLAMA_API_KEY")

# Initialize the NVIDIA model
client = OpenAI(
    base_url="https://integrate.api.nvidia.com/v1",
    api_key=API_KEY  # Replace with your actual API key
)

# Streamlit App Layout
st.title("Personalized Email Drafting Tool")
st.subheader("Generate professional emails effortlessly!")

# Input fields for user
st.sidebar.header("Email Preferences")
email_tone = st.sidebar.selectbox("Select the tone of your email:",
                                   ["Professional", "Formal", "Casual", "Persuasive", "Friendly", "Angry"])
email_topic = st.text_input("Topic of the email", placeholder="e.g., Meeting reschedule")
email_points = st.text_area("Key points to include",
                            placeholder="e.g., Highlight urgency, request a response by Friday")

# Slider for temperature control (creativity level)
temperature = st.sidebar.slider("Set creativity level (temperature):", 0.0, 1.0, 0.5)

# Generate email button
if st.button("Generate Email"):
    if email_topic and email_points:
        # Display loading spinner
        with st.spinner("Generating your email..."):
            # Call NVIDIA's model
            messages = [
                {"role": "system", "content": f"Write an email with a {email_tone.lower()} tone."},
                {"role": "user", "content": f"Topic: {email_topic}\nKey Points: {email_points}"}
            ]
            completion = client.chat.completions.create(
                model="meta/llama-3.1-405b-instruct",
                messages=messages,
                temperature=temperature,
                top_p=0.7,
                max_tokens=512,
                stream=True
            )

            # Placeholder for real-time streaming output
            email_placeholder = st.empty()
            email_output = ""  # To accumulate content

            for chunk in completion:
                if chunk.choices[0].delta.content is not None:
                    email_output += chunk.choices[0].delta.content
                    email_placeholder.text(email_output)  # Update the same placeholder

            # Store the generated email in session state to persist across button clicks
            if email_output:
                st.session_state.generated_email = email_output  # Store in session state

    else:
        st.warning("Please fill in both the topic and key points to generate an email.")

# If generated email exists in session state, display it
if 'generated_email' in st.session_state:
    st.text_area("Generated Email", st.session_state.generated_email, height=300)