Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| MODEL_ID = os.getenv("MODEL_ID", "LiquidAI/LFM2.5-1.2B-Instruct") | |
| HF_TOKEN = os.getenv("HF_TOKEN", None) | |
| # Cache model/tokenizer so Space loads once | |
| _tokenizer = None | |
| _model = None | |
| def load_llm(): | |
| global _tokenizer, _model | |
| if _tokenizer is None or _model is None: | |
| _tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN) | |
| # device_map="auto" uses GPU if available, else CPU | |
| _model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| token=HF_TOKEN, | |
| torch_dtype="auto", | |
| device_map="auto", | |
| ) | |
| _model.eval() | |
| return _tokenizer, _model | |
| def build_messages( | |
| negotiation_type: str, | |
| your_role: str, | |
| counterpart_role: str, | |
| relationship: str, | |
| goal: str, | |
| context: str, | |
| constraints: str, | |
| concessions: str, | |
| deadline: str, | |
| tone: str, | |
| include_subject: bool, | |
| signature_name: str, | |
| email_thread: str, | |
| ): | |
| # Negotiation-focused system prompt | |
| system = f""" | |
| You are an email drafting assistant specialized in negotiation emails. | |
| Rules: | |
| - Keep the tone: {tone}. | |
| - Be respectful and professional. No threats, no insults, no pressure tactics. | |
| - Use principled negotiation: be clear on interests, propose options, ask focused questions. | |
| - Do not invent facts, dates, prices, legal terms, or policy. Use only what the user provides. | |
| - Keep paragraphs short. | |
| Output format: | |
| {"- First line must be a subject line like: Subject: <...>\\n\\nThen the email body." if include_subject else "- Output ONLY the email body (no subject line)."} | |
| End with a sign-off using the provided name if given. | |
| """.strip() | |
| user = f""" | |
| Create a negotiation email draft. | |
| Negotiation type: {negotiation_type} | |
| Your role: {your_role} | |
| Counterpart role: {counterpart_role} | |
| Relationship/context: {relationship} | |
| Goal / Ask: | |
| {goal} | |
| Background details (facts only): | |
| {context} | |
| Constraints / non-negotiables: | |
| {constraints} | |
| Possible concessions / flexibility: | |
| {concessions} | |
| Deadline / timing: | |
| {deadline} | |
| Existing email thread (if any): | |
| {email_thread} | |
| Signature name: | |
| {signature_name} | |
| """.strip() | |
| return [ | |
| {"role": "system", "content": system}, | |
| {"role": "user", "content": user}, | |
| ] | |
| def generate_draft( | |
| negotiation_type, | |
| your_role, | |
| counterpart_role, | |
| relationship, | |
| goal, | |
| context, | |
| constraints, | |
| concessions, | |
| deadline, | |
| tone, | |
| include_subject, | |
| signature_name, | |
| email_thread, | |
| max_new_tokens, | |
| temperature, | |
| top_p, | |
| top_k, | |
| repetition_penalty, | |
| ): | |
| tokenizer, model = load_llm() | |
| messages = build_messages( | |
| negotiation_type=negotiation_type, | |
| your_role=your_role, | |
| counterpart_role=counterpart_role, | |
| relationship=relationship, | |
| goal=goal, | |
| context=context, | |
| constraints=constraints, | |
| concessions=concessions, | |
| deadline=deadline, | |
| tone=tone, | |
| include_subject=include_subject, | |
| signature_name=signature_name, | |
| email_thread=email_thread, | |
| ) | |
| prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| inputs = {k: v.to(model.device) for k, v in inputs.items()} | |
| with torch.no_grad(): | |
| out = model.generate( | |
| **inputs, | |
| max_new_tokens=int(max_new_tokens), | |
| do_sample=True, | |
| temperature=float(temperature), | |
| top_p=float(top_p), | |
| top_k=int(top_k), | |
| repetition_penalty=float(repetition_penalty), | |
| ) | |
| text = tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip() | |
| # Ensure sign-off if user provided a name and model forgot it | |
| if signature_name and signature_name.strip() and signature_name.strip() not in text[-150:]: | |
| text = text.rstrip() + f"\n\nRegards,\n{signature_name.strip()}" | |
| return text | |
| with gr.Blocks(title="Negotiation Email Draft Tool (LFM2)") as demo: | |
| gr.Markdown( | |
| "# Negotiation Email Draft Tool (Powered by LFM2)\n" | |
| "Fill the fields, then generate a negotiation-ready email draft.\n" | |
| "Note: Output is a draft. Verify details before sending." | |
| ) | |
| with gr.Row(): | |
| negotiation_type = gr.Dropdown( | |
| ["Salary/Offer", "Vendor Pricing", "Contract Terms", "Rent/Lease", "Timeline/Delivery", "Refund/Resolution", "Other"], | |
| value="Vendor Pricing", | |
| label="Negotiation Type", | |
| ) | |
| tone = gr.Dropdown( | |
| ["Warm and collaborative", "Firm but polite", "Direct and concise", "Formal and cautious"], | |
| value="Firm but polite", | |
| label="Tone", | |
| ) | |
| with gr.Row(): | |
| your_role = gr.Dropdown( | |
| ["Buyer/Client", "Seller/Vendor", "Candidate/Employee", "Employer", "Tenant", "Landlord", "Other"], | |
| value="Buyer/Client", | |
| label="Your Role", | |
| ) | |
| counterpart_role = gr.Dropdown( | |
| ["Buyer/Client", "Seller/Vendor", "Recruiter/Employer", "Candidate/Employee", "Tenant", "Landlord", "Other"], | |
| value="Seller/Vendor", | |
| label="Counterpart Role", | |
| ) | |
| relationship = gr.Textbox(label="Relationship Context (1–2 lines)", placeholder="Existing vendor relationship, new negotiation, prior discussions, etc.") | |
| goal = gr.Textbox(label="Goal / Ask (be specific)", lines=3, placeholder="Example: Request 12% price reduction or extended payment terms (Net 45).") | |
| context = gr.Textbox(label="Background Facts", lines=4, placeholder="Facts you want included (numbers, dates, scope, current offer, etc).") | |
| constraints = gr.Textbox(label="Constraints / Non-Negotiables", lines=3, placeholder="Example: Budget cap, delivery deadline, must-have clause.") | |
| concessions = gr.Textbox(label="Possible Concessions", lines=3, placeholder="Example: Longer contract term, faster payment, higher volume.") | |
| deadline = gr.Textbox(label="Deadline / Timing", placeholder="Example: Need confirmation by Friday EOD.") | |
| email_thread = gr.Textbox(label="Existing Email Thread (optional)", lines=6, placeholder="Paste prior messages here if this is a reply.") | |
| signature_name = gr.Textbox(label="Signature Name (optional)", placeholder="Your name") | |
| include_subject = gr.Checkbox(value=True, label="Include subject line") | |
| with gr.Accordion("Generation settings (advanced)", open=False): | |
| max_new_tokens = gr.Slider(120, 600, value=280, step=10, label="Max new tokens") | |
| # Model card recommends low temperature/top_p for controlled output :contentReference[oaicite:3]{index=3} | |
| temperature = gr.Slider(0.0, 1.0, value=0.1, step=0.05, label="Temperature") | |
| top_p = gr.Slider(0.05, 1.0, value=0.1, step=0.05, label="Top-p") | |
| top_k = gr.Slider(0, 200, value=50, step=5, label="Top-k") | |
| repetition_penalty = gr.Slider(1.0, 1.3, value=1.05, step=0.01, label="Repetition penalty") | |
| btn = gr.Button("Generate Draft") | |
| output = gr.Textbox(label="Draft Email", lines=14) | |
| btn.click( | |
| fn=generate_draft, | |
| inputs=[ | |
| negotiation_type, your_role, counterpart_role, relationship, goal, context, | |
| constraints, concessions, deadline, tone, include_subject, signature_name, | |
| email_thread, max_new_tokens, temperature, top_p, top_k, repetition_penalty | |
| ], | |
| outputs=[output], | |
| ) | |
| demo.queue().launch() |