Sharyar commited on
Commit
daa1f7e
·
1 Parent(s): 41871dd

Switch to router.huggingface.co OpenAI-compatible chat API for LLM completions

Browse files
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -1,15 +1,16 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from huggingface_hub.utils import HfHubHTTPError
4
  import os
 
5
 
6
  # Use Inference API with LLM only
7
  MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
8
  token = os.getenv("HUGGING_FACE_HUB_TOKEN")
 
9
 
10
  def improve_description(user_input: str) -> str:
11
  """
12
- Rewrites a rough project description using LLM.
13
  """
14
  if not user_input or not user_input.strip():
15
  return "Please provide a description to improve."
@@ -21,31 +22,26 @@ def improve_description(user_input: str) -> str:
21
  if not token:
22
  return "Error: HUGGING_FACE_HUB_TOKEN environment variable is not set. Please configure your Hugging Face token."
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  try:
25
- # Initialize client with model and token - library handles endpoint automatically
26
- client = InferenceClient(model=MODEL_ID, token=token)
27
- messages = [
28
- {
29
- "role": "system",
30
- "content": "You are a technical writing assistant. Rewrite project descriptions to be clear, professional, and concise while preserving all technical details."
31
- },
32
- {
33
- "role": "user",
34
- "content": f"Rewrite this project description professionally:\n\n{user_text}"
35
- }
36
- ]
37
-
38
- response = client.chat.completions.create(
39
- messages=messages,
40
- max_tokens=300,
41
- temperature=0.3,
42
- )
43
- improved_text = response.choices[0].message.content.strip()
44
-
45
  return improved_text if improved_text else "Error: No response generated."
46
-
47
- except HfHubHTTPError as e:
48
- return f"Error: API request failed - {str(e)}"
49
  except Exception as e:
50
  return f"Error: An unexpected error occurred - {str(e)}"
51
 
 
1
+
2
  import gradio as gr
 
 
3
  import os
4
+ import requests
5
 
6
  # Use Inference API with LLM only
7
  MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
8
  token = os.getenv("HUGGING_FACE_HUB_TOKEN")
9
+ ROUTER_URL = "https://router.huggingface.co/v1/chat/completions"
10
 
11
  def improve_description(user_input: str) -> str:
12
  """
13
+ Rewrites a rough project description using LLM via router.huggingface.co OpenAI-compatible API.
14
  """
15
  if not user_input or not user_input.strip():
16
  return "Please provide a description to improve."
 
22
  if not token:
23
  return "Error: HUGGING_FACE_HUB_TOKEN environment variable is not set. Please configure your Hugging Face token."
24
 
25
+ headers = {
26
+ "Authorization": f"Bearer {token}",
27
+ "Content-Type": "application/json"
28
+ }
29
+ payload = {
30
+ "model": MODEL_ID,
31
+ "messages": [
32
+ {"role": "system", "content": "You are a technical writing assistant. Rewrite project descriptions to be clear, professional, and concise while preserving all technical details."},
33
+ {"role": "user", "content": f"Rewrite this project description professionally:\n\n{user_text}"}
34
+ ],
35
+ "max_tokens": 300,
36
+ "temperature": 0.3
37
+ }
38
  try:
39
+ resp = requests.post(ROUTER_URL, headers=headers, json=payload, timeout=60)
40
+ if resp.status_code != 200:
41
+ return f"Error: API request failed - {resp.status_code} {resp.reason}: {resp.text}"
42
+ data = resp.json()
43
+ improved_text = data["choices"][0]["message"]["content"].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  return improved_text if improved_text else "Error: No response generated."
 
 
 
45
  except Exception as e:
46
  return f"Error: An unexpected error occurred - {str(e)}"
47