SanaAdeel commited on
Commit
5a9deb5
·
verified ·
1 Parent(s): 9baab51

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -49
app.py CHANGED
@@ -1,72 +1,81 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- from huggingface_hub import InferenceClient
5
 
6
  # Load secrets
7
  PAYWALLS_API_KEY = os.environ.get("PAYWALLS_API_KEY")
8
  PAYWALLS_API_URL = os.environ.get("PAYWALLS_API_URL", "https://api.paywalls.ai/v1")
 
9
 
10
- # Check paywall access and get message/link
11
- def check_paywall(user_id: str, feature_name: str = "premium_generation") -> tuple:
12
  headers = {
13
- "Authorization": f"Bearer {PAYWALLS_API_KEY}"
14
- }
15
- payload = {
16
- "user_id": user_id,
17
- "feature": feature_name
18
  }
19
  try:
20
- resp = requests.post(f"{PAYWALLS_API_URL}/access", json=payload, headers=headers, timeout=10)
 
21
  data = resp.json()
22
- access_granted = data.get("access_granted", False)
23
- message = data.get("message", "")
24
- return access_granted, message
 
 
 
25
  except Exception as e:
26
- print(f"[Paywall] Error: {e}")
27
- return False, ""
28
 
29
- # Respond function
30
- def respond(
31
- message, history, system_message, max_tokens, temperature, top_p, hf_token
32
- ):
33
- user_id = "demo_user"
34
- premium, paywall_msg = check_paywall(user_id)
 
35
 
36
- # Determine tokens allowed
37
- tokens_allowed = max_tokens if premium else min(120, max_tokens)
38
- upgrade_prompt = ""
 
 
 
 
 
 
 
39
 
40
- # If not premium, include top-up/sign-in links
41
- if not premium and paywall_msg:
42
- # Expect the API message to include clickable links
43
- prompt_message = paywall_msg
44
- else:
45
- prompt_message = ""
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- # Initiate Hugging Face inference client
48
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
 
49
 
50
- messages = [{"role": "system", "content": system_message}]
51
- messages.extend(history)
52
- messages.append({"role": "user", "content": message})
53
 
54
- response = ""
55
- for partial in client.chat_completion(
56
- messages,
57
- max_tokens=tokens_allowed,
58
- stream=True,
59
- temperature=temperature,
60
- top_p=top_p,
61
- ):
62
- choices = partial.choices
63
- token = ""
64
- if len(choices) and choices[0].delta.content:
65
- token = choices[0].delta.content
66
- response += token
67
- yield response + (prompt_message if not premium else "")
68
 
69
- # Gradio interface
70
  chatbot = gr.ChatInterface(
71
  respond,
72
  type="messages",
 
1
  import os
2
  import gradio as gr
3
  import requests
4
+ import json
5
 
6
  # Load secrets
7
  PAYWALLS_API_KEY = os.environ.get("PAYWALLS_API_KEY")
8
  PAYWALLS_API_URL = os.environ.get("PAYWALLS_API_URL", "https://api.paywalls.ai/v1")
9
+ HF_MODEL = "openai/gpt-oss-20b" # Change as needed
10
 
11
+ # Check if user is connected and authorized to use the paywall
12
+ def check_paywall(user_id: str):
13
  headers = {
14
+ "Authorization": f"Bearer {PAYWALLS_API_KEY}",
15
+ "X-Paywall-User": user_id
 
 
 
16
  }
17
  try:
18
+ resp = requests.get(f"{PAYWALLS_API_URL}/user/connect?user={user_id}", headers=headers, timeout=10)
19
+ resp.raise_for_status()
20
  data = resp.json()
21
+ if data.get("connected"):
22
+ return True, ""
23
+ else:
24
+ # Provide user the URL to connect their account or top up
25
+ message = f"Please connect or top up to access. Visit: {data.get('url', 'No URL provided')}"
26
+ return False, message
27
  except Exception as e:
28
+ print(f"[Paywall] Error checking connection: {e}")
29
+ return False, "Error checking paywall connection."
30
 
31
+ # Perform a chat completion request via Paywalls proxy endpoint
32
+ def chat_completion(user_id, system_message, history, user_message, max_tokens, temperature, top_p):
33
+ headers = {
34
+ "Authorization": f"Bearer {PAYWALLS_API_KEY}",
35
+ "Content-Type": "application/json",
36
+ "X-Paywall-User": user_id
37
+ }
38
 
39
+ messages = [{"role": "system", "content": system_message}] + history + [{"role": "user", "content": user_message}]
40
+ payload = {
41
+ "model": HF_MODEL,
42
+ "messages": messages,
43
+ "stream": False,
44
+ "user": user_id,
45
+ "max_tokens": max_tokens,
46
+ "temperature": temperature,
47
+ "top_p": top_p
48
+ }
49
 
50
+ try:
51
+ response = requests.post(f"{PAYWALLS_API_URL}/chat/completions", headers=headers, data=json.dumps(payload), timeout=60)
52
+ response.raise_for_status()
53
+ data = response.json()
54
+ choices = data.get("choices", [])
55
+ if choices:
56
+ return choices[0]["message"]["content"]
57
+ else:
58
+ return "No response from model."
59
+ except Exception as e:
60
+ print(f"[Paywall] Chat completion error: {e}")
61
+ return f"Error retrieving completion: {e}"
62
+
63
+ # Full respond function used by Gradio chatbot interface
64
+ def respond(message, history, system_message, max_tokens, temperature, top_p, hf_token):
65
+ user_id = "demo_user" # Replace or dynamically assign per user session
66
+ connected, paywall_msg = check_paywall(user_id)
67
 
68
+ if not connected:
69
+ # User not connected. Show connection URL or message.
70
+ return (history, paywall_msg)
71
 
72
+ # User connected - forward chat message to paywalls chat completions endpoint
73
+ reply_text = chat_completion(user_id, system_message, history, message, max_tokens, temperature, top_p)
 
74
 
75
+ history = history + [{"role": "user", "content": message}, {"role": "assistant", "content": reply_text}]
76
+ return history, ""
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
+ # Gradio interface setup
79
  chatbot = gr.ChatInterface(
80
  respond,
81
  type="messages",