Israa-M commited on
Commit
9a9150c
·
verified ·
1 Parent(s): 614bb71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -49
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import os
2
- import re
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
 
6
  HF_TOKEN = os.getenv("HF_TOKEN", "")
 
 
7
  DEFAULT_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-72B-Instruct")
8
 
9
- MAX_TOKENS = 1400
10
  TEMPERATURE = 0.4
11
 
12
  SYSTEM_INSTRUCTIONS = """
@@ -37,11 +39,10 @@ Under Message Examples:
37
  - One decision-maker message
38
 
39
  Tone: senior AE speaking to peers. No emojis.
40
- """
41
 
42
  USER_TEMPLATE = """
43
- Account (name or org URL):
44
- {account}
45
 
46
  Optional context:
47
  Industry: {industry}
@@ -55,36 +56,47 @@ Notes: {notes}
55
  Task:
56
  Create a structured outbound plan for this account based on public
57
  open-source platform usage and OSS → Enterprise conversion patterns.
58
- """
 
59
 
60
- def call_model(prompt, model):
 
61
  if not HF_TOKEN:
62
- return "Missing HF_TOKEN. Add it in Space Settings → Secrets."
 
 
 
 
 
 
63
 
64
  client = InferenceClient(model=model, token=HF_TOKEN)
65
 
66
- response = client.chat.completions.create(
67
- messages=[
68
- {"role": "system", "content": SYSTEM_INSTRUCTIONS},
69
- {"role": "user", "content": prompt},
70
- ],
71
- temperature=TEMPERATURE,
72
- max_tokens=MAX_TOKENS,
73
- )
74
-
75
- return response.choices[0].message.content.strip()
76
-
77
- def generate(
78
- account,
79
- industry,
80
- region,
81
- primary_persona,
82
- secondary_personas,
83
- offer_focus,
84
- goal,
85
- notes,
86
- model,
87
- ):
 
 
 
88
  if not account:
89
  return "Please input an account name or org URL."
90
 
@@ -99,7 +111,9 @@ def generate(
99
  notes=notes or "Not provided",
100
  )
101
 
102
- return call_model(prompt, model or DEFAULT_MODEL)
 
 
103
 
104
  with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
105
  gr.Markdown(
@@ -107,7 +121,7 @@ with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
107
  "Input an **account name or public org URL** to generate an enterprise outbound plan."
108
  )
109
 
110
- account = gr.Textbox(label="Account name or org URL")
111
 
112
  with gr.Row():
113
  industry = gr.Textbox(label="Industry (optional)")
@@ -121,29 +135,16 @@ with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
121
  goal = gr.Textbox(label="Goal (optional)")
122
  notes = gr.Textbox(label="Notes / known public signals (optional)", lines=4)
123
 
124
- model = gr.Textbox(
125
- label="Model",
126
- value=DEFAULT_MODEL,
127
- help="Change only if you know what you're doing",
128
- )
129
 
130
  run = gr.Button("Generate outbound plan")
131
  output = gr.Markdown()
132
 
133
  run.click(
134
  fn=generate,
135
- inputs=[
136
- account,
137
- industry,
138
- region,
139
- primary_persona,
140
- secondary_personas,
141
- offer_focus,
142
- goal,
143
- notes,
144
- model,
145
- ],
146
  outputs=output,
147
  )
148
 
149
- app.launch()
 
 
1
  import os
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # In Space Settings → Secrets, add HF_TOKEN
6
  HF_TOKEN = os.getenv("HF_TOKEN", "")
7
+
8
+ # You can also add HF_MODEL in Secrets; otherwise use a default.
9
  DEFAULT_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-72B-Instruct")
10
 
11
+ MAX_NEW_TOKENS = 1400
12
  TEMPERATURE = 0.4
13
 
14
  SYSTEM_INSTRUCTIONS = """
 
39
  - One decision-maker message
40
 
41
  Tone: senior AE speaking to peers. No emojis.
42
+ """.strip()
43
 
44
  USER_TEMPLATE = """
45
+ Account (name or org URL): {account}
 
46
 
47
  Optional context:
48
  Industry: {industry}
 
56
  Task:
57
  Create a structured outbound plan for this account based on public
58
  open-source platform usage and OSS → Enterprise conversion patterns.
59
+ """.strip()
60
+
61
 
62
+ def call_model(prompt: str, model: str) -> str:
63
+ # IMPORTANT: Don't crash the Space if the token is missing.
64
  if not HF_TOKEN:
65
+ return (
66
+ "Missing HF_TOKEN.\n\n"
67
+ "Fix:\n"
68
+ "Space → Settings → Secrets → Add:\n"
69
+ "- Name: HF_TOKEN\n"
70
+ "- Value: your Hugging Face token\n"
71
+ )
72
 
73
  client = InferenceClient(model=model, token=HF_TOKEN)
74
 
75
+ # Most-compatible approach across endpoints:
76
+ full_prompt = f"{SYSTEM_INSTRUCTIONS}\n\n{prompt}\n\nReturn the output using the required 6 headings."
77
+
78
+ try:
79
+ out = client.text_generation(
80
+ full_prompt,
81
+ max_new_tokens=MAX_NEW_TOKENS,
82
+ temperature=TEMPERATURE,
83
+ do_sample=True,
84
+ return_full_text=False,
85
+ )
86
+ return (out or "").strip()
87
+ except Exception as e:
88
+ return (
89
+ "Model call failed.\n\n"
90
+ f"Model: {model}\n"
91
+ f"Error: {repr(e)}\n\n"
92
+ "Try:\n"
93
+ "1) Set a different HF_MODEL (an instruct model you have access to)\n"
94
+ "2) Or use a smaller model for reliability\n"
95
+ )
96
+
97
+
98
+ def generate(account, industry, region, primary_persona, secondary_personas, offer_focus, goal, notes, model):
99
+ account = (account or "").strip()
100
  if not account:
101
  return "Please input an account name or org URL."
102
 
 
111
  notes=notes or "Not provided",
112
  )
113
 
114
+ model = (model or DEFAULT_MODEL).strip()
115
+ return call_model(prompt, model)
116
+
117
 
118
  with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
119
  gr.Markdown(
 
121
  "Input an **account name or public org URL** to generate an enterprise outbound plan."
122
  )
123
 
124
+ account = gr.Textbox(label="Account name or org URL", placeholder="e.g., Emirates or https://huggingface.co/emirates")
125
 
126
  with gr.Row():
127
  industry = gr.Textbox(label="Industry (optional)")
 
135
  goal = gr.Textbox(label="Goal (optional)")
136
  notes = gr.Textbox(label="Notes / known public signals (optional)", lines=4)
137
 
138
+ model = gr.Textbox(label="Model", value=DEFAULT_MODEL)
 
 
 
 
139
 
140
  run = gr.Button("Generate outbound plan")
141
  output = gr.Markdown()
142
 
143
  run.click(
144
  fn=generate,
145
+ inputs=[account, industry, region, primary_persona, secondary_personas, offer_focus, goal, notes, model],
 
 
 
 
 
 
 
 
 
 
146
  outputs=output,
147
  )
148
 
149
+ # IMPORTANT for Hugging Face Spaces:
150
+ app.launch(server_name="0.0.0.0", server_port=7860)