File size: 7,787 Bytes
5b4e64e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
import os
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

MODEL_ID = os.getenv("MODEL_ID", "LiquidAI/LFM2.5-1.2B-Instruct")
HF_TOKEN = os.getenv("HF_TOKEN", None)

# Cache model/tokenizer so Space loads once
_tokenizer = None
_model = None

def load_llm():
    global _tokenizer, _model
    if _tokenizer is None or _model is None:
        _tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)

        # device_map="auto" uses GPU if available, else CPU
        _model = AutoModelForCausalLM.from_pretrained(
            MODEL_ID,
            token=HF_TOKEN,
            torch_dtype="auto",
            device_map="auto",
        )
        _model.eval()
    return _tokenizer, _model


def build_messages(

    negotiation_type: str,

    your_role: str,

    counterpart_role: str,

    relationship: str,

    goal: str,

    context: str,

    constraints: str,

    concessions: str,

    deadline: str,

    tone: str,

    include_subject: bool,

    signature_name: str,

    email_thread: str,

):
    # Negotiation-focused system prompt
    system = f"""

You are an email drafting assistant specialized in negotiation emails.



Rules:

- Keep the tone: {tone}.

- Be respectful and professional. No threats, no insults, no pressure tactics.

- Use principled negotiation: be clear on interests, propose options, ask focused questions.

- Do not invent facts, dates, prices, legal terms, or policy. Use only what the user provides.

- Keep paragraphs short.



Output format:

{"- First line must be a subject line like: Subject: <...>\\n\\nThen the email body." if include_subject else "- Output ONLY the email body (no subject line)."}

End with a sign-off using the provided name if given.

""".strip()

    user = f"""

Create a negotiation email draft.



Negotiation type: {negotiation_type}

Your role: {your_role}

Counterpart role: {counterpart_role}

Relationship/context: {relationship}



Goal / Ask:

{goal}



Background details (facts only):

{context}



Constraints / non-negotiables:

{constraints}



Possible concessions / flexibility:

{concessions}



Deadline / timing:

{deadline}



Existing email thread (if any):

{email_thread}



Signature name:

{signature_name}

""".strip()

    return [
        {"role": "system", "content": system},
        {"role": "user", "content": user},
    ]


def generate_draft(

    negotiation_type,

    your_role,

    counterpart_role,

    relationship,

    goal,

    context,

    constraints,

    concessions,

    deadline,

    tone,

    include_subject,

    signature_name,

    email_thread,

    max_new_tokens,

    temperature,

    top_p,

    top_k,

    repetition_penalty,

):
    tokenizer, model = load_llm()

    messages = build_messages(
        negotiation_type=negotiation_type,
        your_role=your_role,
        counterpart_role=counterpart_role,
        relationship=relationship,
        goal=goal,
        context=context,
        constraints=constraints,
        concessions=concessions,
        deadline=deadline,
        tone=tone,
        include_subject=include_subject,
        signature_name=signature_name,
        email_thread=email_thread,
    )

    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

    inputs = tokenizer(prompt, return_tensors="pt")
    inputs = {k: v.to(model.device) for k, v in inputs.items()}

    with torch.no_grad():
        out = model.generate(
            **inputs,
            max_new_tokens=int(max_new_tokens),
            do_sample=True,
            temperature=float(temperature),
            top_p=float(top_p),
            top_k=int(top_k),
            repetition_penalty=float(repetition_penalty),
        )

    text = tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

    # Ensure sign-off if user provided a name and model forgot it
    if signature_name and signature_name.strip() and signature_name.strip() not in text[-150:]:
        text = text.rstrip() + f"\n\nRegards,\n{signature_name.strip()}"

    return text


with gr.Blocks(title="Negotiation Email Draft Tool (LFM2)") as demo:
    gr.Markdown(
        "# Negotiation Email Draft Tool (Powered by LFM2)\n"
        "Fill the fields, then generate a negotiation-ready email draft.\n"
        "Note: Output is a draft. Verify details before sending."
    )

    with gr.Row():
        negotiation_type = gr.Dropdown(
            ["Salary/Offer", "Vendor Pricing", "Contract Terms", "Rent/Lease", "Timeline/Delivery", "Refund/Resolution", "Other"],
            value="Vendor Pricing",
            label="Negotiation Type",
        )
        tone = gr.Dropdown(
            ["Warm and collaborative", "Firm but polite", "Direct and concise", "Formal and cautious"],
            value="Firm but polite",
            label="Tone",
        )

    with gr.Row():
        your_role = gr.Dropdown(
            ["Buyer/Client", "Seller/Vendor", "Candidate/Employee", "Employer", "Tenant", "Landlord", "Other"],
            value="Buyer/Client",
            label="Your Role",
        )
        counterpart_role = gr.Dropdown(
            ["Buyer/Client", "Seller/Vendor", "Recruiter/Employer", "Candidate/Employee", "Tenant", "Landlord", "Other"],
            value="Seller/Vendor",
            label="Counterpart Role",
        )

    relationship = gr.Textbox(label="Relationship Context (1–2 lines)", placeholder="Existing vendor relationship, new negotiation, prior discussions, etc.")
    goal = gr.Textbox(label="Goal / Ask (be specific)", lines=3, placeholder="Example: Request 12% price reduction or extended payment terms (Net 45).")
    context = gr.Textbox(label="Background Facts", lines=4, placeholder="Facts you want included (numbers, dates, scope, current offer, etc).")
    constraints = gr.Textbox(label="Constraints / Non-Negotiables", lines=3, placeholder="Example: Budget cap, delivery deadline, must-have clause.")
    concessions = gr.Textbox(label="Possible Concessions", lines=3, placeholder="Example: Longer contract term, faster payment, higher volume.")
    deadline = gr.Textbox(label="Deadline / Timing", placeholder="Example: Need confirmation by Friday EOD.")
    email_thread = gr.Textbox(label="Existing Email Thread (optional)", lines=6, placeholder="Paste prior messages here if this is a reply.")
    signature_name = gr.Textbox(label="Signature Name (optional)", placeholder="Your name")

    include_subject = gr.Checkbox(value=True, label="Include subject line")
    with gr.Accordion("Generation settings (advanced)", open=False):
        max_new_tokens = gr.Slider(120, 600, value=280, step=10, label="Max new tokens")
        # Model card recommends low temperature/top_p for controlled output :contentReference[oaicite:3]{index=3}
        temperature = gr.Slider(0.0, 1.0, value=0.1, step=0.05, label="Temperature")
        top_p = gr.Slider(0.05, 1.0, value=0.1, step=0.05, label="Top-p")
        top_k = gr.Slider(0, 200, value=50, step=5, label="Top-k")
        repetition_penalty = gr.Slider(1.0, 1.3, value=1.05, step=0.01, label="Repetition penalty")

    btn = gr.Button("Generate Draft")
    output = gr.Textbox(label="Draft Email", lines=14)

    btn.click(
        fn=generate_draft,
        inputs=[
            negotiation_type, your_role, counterpart_role, relationship, goal, context,
            constraints, concessions, deadline, tone, include_subject, signature_name,
            email_thread, max_new_tokens, temperature, top_p, top_k, repetition_penalty
        ],
        outputs=[output],
    )

demo.queue().launch()