SanaAdeel commited on
Commit
b1b97e5
·
verified ·
1 Parent(s): 0c20979

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -29
app.py CHANGED
@@ -1,62 +1,75 @@
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def respond(
6
- message,
7
  history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
  hf_token: gr.OAuthToken,
13
  ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
 
 
17
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
  messages = [{"role": "system", "content": system_message}]
20
-
21
  messages.extend(history)
22
-
23
  messages.append({"role": "user", "content": message})
24
 
25
  response = ""
26
-
27
- for message in client.chat_completion(
28
  messages,
29
- max_tokens=max_tokens,
30
  stream=True,
31
  temperature=temperature,
32
  top_p=top_p,
33
  ):
34
- choices = message.choices
35
  token = ""
36
  if len(choices) and choices[0].delta.content:
37
  token = choices[0].delta.content
38
-
39
  response += token
40
- yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  chatbot = gr.ChatInterface(
47
  respond,
48
  type="messages",
49
  additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
  ],
61
  )
62
 
@@ -65,6 +78,5 @@ with gr.Blocks() as demo:
65
  gr.LoginButton()
66
  chatbot.render()
67
 
68
-
69
  if __name__ == "__main__":
70
  demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ import requests
4
  from huggingface_hub import InferenceClient
5
 
6
+ # --- Load secrets from Hugging Face environment
7
+ PAYWALLS_API_KEY = os.environ.get("PAYWALLS_API_KEY") # Paywalls.ai secret
8
+ PAYWALLS_API_URL = os.environ.get("PAYWALLS_API_URL", "https://api.paywalls.ai/v1")
9
+ # If your GPT model requires HF_TOKEN as secret, it will be passed via OAuthToken in Gradio; don't hard-code.
10
 
11
+ # --- Paywall Check Utility
12
+ def check_paywall(user_id: str, feature_name: str = "premium_generation") -> bool:
13
+ headers = {
14
+ "Authorization": f"Bearer {PAYWALLS_API_KEY}"
15
+ }
16
+ payload = {
17
+ "user_id": user_id,
18
+ "feature": feature_name
19
+ }
20
+ try:
21
+ resp = requests.post(f"{PAYWALLS_API_URL}/access", json=payload, headers=headers, timeout=10)
22
+ data = resp.json()
23
+ return data.get("access_granted", False)
24
+ except Exception as e:
25
+ print(f"[Paywall] Integration error: {e}")
26
+ return False
27
+
28
+ # --- Chatbot Logic with Paywall Gate
29
  def respond(
30
+ message: str,
31
  history: list[dict[str, str]],
32
+ system_message: str,
33
+ max_tokens: int,
34
+ temperature: float,
35
+ top_p: float,
36
  hf_token: gr.OAuthToken,
37
  ):
38
+ user_id = "demo_user" # For demo, static value; Replace with account/session for real use
39
+ premium = check_paywall(user_id, feature_name="premium_generation")
40
+ tokens_allowed = max_tokens if premium else min(120, max_tokens)
41
+ upgrade_msg = "\n\n⭐ Unlock longer stories and advanced features by upgrading through our Paywalls.ai payment portal! ⭐" if not premium else ""
42
+
43
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
44
 
45
  messages = [{"role": "system", "content": system_message}]
 
46
  messages.extend(history)
 
47
  messages.append({"role": "user", "content": message})
48
 
49
  response = ""
50
+ for partial in client.chat_completion(
 
51
  messages,
52
+ max_tokens=tokens_allowed,
53
  stream=True,
54
  temperature=temperature,
55
  top_p=top_p,
56
  ):
57
+ choices = partial.choices
58
  token = ""
59
  if len(choices) and choices[0].delta.content:
60
  token = choices[0].delta.content
 
61
  response += token
62
+ yield response + (upgrade_msg if not premium else "")
63
 
64
+ # --- Gradio Interface
 
 
 
65
  chatbot = gr.ChatInterface(
66
  respond,
67
  type="messages",
68
  additional_inputs=[
69
+ gr.Textbox(value="You are a creative writing assistant.", label="System message"),
70
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
71
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
72
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
73
  ],
74
  )
75
 
 
78
  gr.LoginButton()
79
  chatbot.render()
80
 
 
81
  if __name__ == "__main__":
82
  demo.launch()