File size: 3,530 Bytes
ef6149e
 
 
 
 
 
2d46fda
 
ef6149e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import gradio as gr
import requests
import os

# Hugging Face API Token (from secrets)
API_TOKEN = os.getenv("HF_API_KEY")
if not API_TOKEN:
    print("Error: API token is missing! Please add it as a secret in Hugging Face Spaces.")

# Model Endpoint
model = "mistralai/Mixtral-8x7B-Instruct-v0.1"  # Use a working instruct model
api_url = f"https://api-inference.huggingface.co/models/{model}"

# Headers
headers = {"Authorization": f"Bearer {API_TOKEN}"}

# Function to generate email response
def generate_email_response(received_email, additional_context, tone="Professional and Polite"):
    # Construct the prompt with additional context
    prompt = f"""
    The following is an email I received:

    {received_email}

    Additional context to consider:
    {additional_context}

    Please draft a response that is {tone}. Keep it concise and appropriate.

    My response:
    """

    # API request payload
    payload = {
        "inputs": prompt,
        "parameters": {"max_length": 400, "temperature": 0.7}
    }

    # Send request to HF API
    response = requests.post(api_url, headers=headers, json=payload)

    # Handle response
    if response.status_code == 200:
        generated_text = response.json()[0]["generated_text"]

        # **Trim the response** to remove the echoed prompt
        if "My response:" in generated_text:
            generated_text = generated_text.split("My response:")[-1].strip()

        return generated_text
    else:
        return "Error: " + response.json().get("error", "Unknown issue occurred.")


# Example email
example_email = """Subject: Request for Collaboration on AI Research

Dear Walid,

I hope you are doing well. My team and I have been following your recent work on hybrid neural networks and FLOPs-aware optimization. We are very interested in collaborating on a research project that explores the application of your techniques in low-resource environments.

Would you be available for a meeting next week to discuss potential collaboration opportunities? Please let us know your availability.

Looking forward to your response.

Best regards,
John Smith
AI Research Lead, Tech Innovations Inc."""

# Example additional context
example_context = """I am available on Monday, Wednesday, and Friday from 2 PM to 5 PM.
I prefer virtual meetings via Zoom or Google Meet."""

# Gradio Web Interface
with gr.Blocks() as demo:
    gr.Markdown("## AI-Powered Email Responder (with Context)")
    gr.Markdown("Enter an email, and the model will generate a response in your selected tone. You can also add additional context, such as your availability.")

    with gr.Row():
        received_email = gr.Textbox(label="Received Email", lines=10)
        additional_context = gr.Textbox(label="Additional Context (Availability, Preferences, etc.)", lines=5)

    tone = gr.Radio(
        ["Professional and Polite", "Friendly and Casual", "Formal and Concise"],
        label="Tone of Response",
        value="Professional and Polite"
    )

    output_response = gr.Textbox(label="Generated Response", lines=6)

    generate_button = gr.Button("Generate Response")
    generate_button.click(generate_email_response, inputs=[received_email, additional_context, tone], outputs=output_response)

    # Add example input as a predefined example
    gr.Examples(
        examples=[
            [example_email, example_context, "Professional and Polite"]
        ],
        inputs=[received_email, additional_context, tone],
    )

# Launch the Gradio app
demo.launch()