Israa-M commited on
Commit
bac0e51
·
verified ·
1 Parent(s): 52701e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -61
app.py CHANGED
@@ -1,31 +1,24 @@
1
  import os
2
  import gradio as gr
3
- from huggingface_hub import InferenceClient
4
 
5
- # In Space Settings → Secrets, add HF_TOKEN
6
  HF_TOKEN = os.getenv("HF_TOKEN", "")
 
7
 
8
- # You can also add HF_MODEL in Secrets; otherwise use a default.
9
- DEFAULT_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-72B-Instruct")
10
-
11
- MAX_NEW_TOKENS = 1400
12
- TEMPERATURE = 0.4
13
 
14
  SYSTEM_INSTRUCTIONS = """
15
- You are a senior enterprise Account Executive specializing in converting
16
- open-source platform usage into enterprise-grade commercial engagements.
17
-
18
- You do NOT represent any specific vendor.
19
- You do NOT claim access to private or internal data.
20
 
21
- Hard rules:
22
- - Anchor everything in public signals
23
- - Separate technical users from decision makers
24
- - Be practical and executable
25
- - Avoid generic SaaS language
26
- - If information is missing, state assumptions clearly
27
 
28
- You MUST output exactly these sections:
29
 
30
  1. Account Snapshot
31
  2. Usage Hypotheses
@@ -54,52 +47,23 @@ Goal: {goal}
54
  Notes: {notes}
55
 
56
  Task:
57
- Create a structured outbound plan for this account based on public
58
- open-source platform usage and OSS → Enterprise conversion patterns.
59
  """.strip()
60
 
61
 
62
- def call_model(prompt: str, model: str) -> str:
63
- # IMPORTANT: Don't crash the Space if the token is missing.
 
 
 
64
  if not HF_TOKEN:
65
  return (
66
  "Missing HF_TOKEN.\n\n"
67
  "Fix:\n"
68
- "Space → Settings → Secrets → Add:\n"
69
- "- Name: HF_TOKEN\n"
70
- "- Value: your Hugging Face token\n"
71
  )
72
 
73
- client = InferenceClient(model=model, token=HF_TOKEN, base_url="https://router.huggingface.co")
74
-
75
-
76
- # Most-compatible approach across endpoints:
77
- full_prompt = f"{SYSTEM_INSTRUCTIONS}\n\n{prompt}\n\nReturn the output using the required 6 headings."
78
-
79
- try:
80
- out = client.text_generation(
81
- full_prompt,
82
- max_new_tokens=MAX_NEW_TOKENS,
83
- temperature=TEMPERATURE,
84
- do_sample=True,
85
- return_full_text=False,
86
- )
87
- return (out or "").strip()
88
- except Exception as e:
89
- return (
90
- "Model call failed.\n\n"
91
- f"Model: {model}\n"
92
- f"Error: {repr(e)}\n\n"
93
- "Try:\n"
94
- "1) Set a different HF_MODEL (an instruct model you have access to)\n"
95
- "2) Or use a smaller model for reliability\n"
96
- )
97
-
98
-
99
- def generate(account, industry, region, primary_persona, secondary_personas, offer_focus, goal, notes, model):
100
- account = (account or "").strip()
101
- if not account:
102
- return "Please input an account name or org URL."
103
 
104
  prompt = USER_TEMPLATE.format(
105
  account=account,
@@ -108,21 +72,41 @@ def generate(account, industry, region, primary_persona, secondary_personas, off
108
  primary_persona=primary_persona or "Not provided",
109
  secondary_personas=secondary_personas or "Not provided",
110
  offer_focus=offer_focus or "Not provided",
111
- goal=goal or "Convert free usage to enterprise engagement",
112
  notes=notes or "Not provided",
113
  )
114
 
115
  model = (model or DEFAULT_MODEL).strip()
116
- return call_model(prompt, model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
 
119
  with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
120
  gr.Markdown(
121
  "# Enterprise OSS → Outbound Ops\n"
122
- "Input an **account name or public org URL** to generate an enterprise outbound plan."
123
  )
124
 
125
- account = gr.Textbox(label="Account name or org URL", placeholder="e.g., Emirates or https://huggingface.co/emirates")
126
 
127
  with gr.Row():
128
  industry = gr.Textbox(label="Industry (optional)")
@@ -147,5 +131,4 @@ with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
147
  outputs=output,
148
  )
149
 
150
- # IMPORTANT for Hugging Face Spaces:
151
  app.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import os
2
  import gradio as gr
3
+ from openai import OpenAI
4
 
 
5
  HF_TOKEN = os.getenv("HF_TOKEN", "")
6
+ DEFAULT_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct")
7
 
8
+ # Hugging Face Inference Providers router (OpenAI-compatible)
9
+ # Docs: base_url="https://router.huggingface.co/v1"
10
+ ROUTER_BASE_URL = "https://router.huggingface.co/v1"
 
 
11
 
12
  SYSTEM_INSTRUCTIONS = """
13
+ You are a senior enterprise Account Executive specializing in converting open-source platform usage into enterprise-grade commercial engagements.
 
 
 
 
14
 
15
+ Rules:
16
+ - Do NOT represent any specific vendor.
17
+ - Do NOT claim access to private/internal data.
18
+ - Anchor everything in public signals and state assumptions clearly.
19
+ - Avoid generic SaaS buzzwords. Be executable.
 
20
 
21
+ You MUST output exactly these headings:
22
 
23
  1. Account Snapshot
24
  2. Usage Hypotheses
 
47
  Notes: {notes}
48
 
49
  Task:
50
+ Create a structured outbound plan for this account based on public open-source platform usage and OSS → Enterprise conversion patterns.
 
51
  """.strip()
52
 
53
 
54
+ def generate(account, industry, region, primary_persona, secondary_personas, offer_focus, goal, notes, model):
55
+ account = (account or "").strip()
56
+ if not account:
57
+ return "Please input an account name or org URL."
58
+
59
  if not HF_TOKEN:
60
  return (
61
  "Missing HF_TOKEN.\n\n"
62
  "Fix:\n"
63
+ "Space → Settings → Secrets → add HF_TOKEN with permission: “Make calls to Inference Providers”."
 
 
64
  )
65
 
66
+ client = OpenAI(base_url=ROUTER_BASE_URL, api_key=HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  prompt = USER_TEMPLATE.format(
69
  account=account,
 
72
  primary_persona=primary_persona or "Not provided",
73
  secondary_personas=secondary_personas or "Not provided",
74
  offer_focus=offer_focus or "Not provided",
75
+ goal=goal or "Convert free/community usage to enterprise engagement",
76
  notes=notes or "Not provided",
77
  )
78
 
79
  model = (model or DEFAULT_MODEL).strip()
80
+
81
+ try:
82
+ resp = client.chat.completions.create(
83
+ model=model,
84
+ messages=[
85
+ {"role": "system", "content": SYSTEM_INSTRUCTIONS},
86
+ {"role": "user", "content": prompt},
87
+ ],
88
+ temperature=0.4,
89
+ max_tokens=1400,
90
+ )
91
+ return resp.choices[0].message.content.strip()
92
+ except Exception as e:
93
+ return (
94
+ "Model call failed.\n\n"
95
+ f"Model: {model}\n"
96
+ f"Error: {repr(e)}\n\n"
97
+ "Try switching the model to a smaller instruct model you have access to, e.g.:\n"
98
+ "- Qwen/Qwen2.5-7B-Instruct\n"
99
+ "- mistralai/Mistral-7B-Instruct-v0.3\n"
100
+ )
101
 
102
 
103
  with gr.Blocks(title="Enterprise OSS → Outbound Ops") as app:
104
  gr.Markdown(
105
  "# Enterprise OSS → Outbound Ops\n"
106
+ "Input an **account name or org URL** to generate a structured enterprise outbound plan."
107
  )
108
 
109
+ account = gr.Textbox(label="Account name or org URL", placeholder="e.g., Emirates OR https://huggingface.co/emirates")
110
 
111
  with gr.Row():
112
  industry = gr.Textbox(label="Industry (optional)")
 
131
  outputs=output,
132
  )
133
 
 
134
  app.launch(server_name="0.0.0.0", server_port=7860)