huijio commited on
Commit
c5e4874
·
verified ·
1 Parent(s): f57e751

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -27
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import gradio as gr
2
  import requests
3
  from html import escape
4
- from typing import List, Tuple, Optional
5
 
6
  # API endpoints
7
- API_URL = "https://aham2api-3.onrender.com/v1/chat/completions"
8
- MODELS_URL = "https://aham2api-3.onrender.com/v1/models"
9
 
10
  # CSS for styling
11
  css = """
@@ -15,7 +15,7 @@ css = """
15
  def get_available_models() -> List[str]:
16
  """Fetch available models from the API."""
17
  try:
18
- response = requests.get(MODELS_URL)
19
  response.raise_for_status()
20
  models_data = response.json()
21
  return sorted([model['id'] for model in models_data.get('data', [])])
@@ -24,7 +24,6 @@ def get_available_models() -> List[str]:
24
  return [
25
  "samura-deepseek-r1",
26
  "samura-gpt-4o",
27
- "samura-claude-3-5-sonnet",
28
  "groq-llama3-70b-8192"
29
  ]
30
 
@@ -39,10 +38,11 @@ def chat_completion(
39
  "messages": messages,
40
  "temperature": 0.7,
41
  "max_tokens": 2000,
 
42
  }
43
 
44
  try:
45
- response = requests.post(API_URL, headers=headers, json=data)
46
  response.raise_for_status()
47
  return response.json()
48
  except Exception as e:
@@ -52,19 +52,6 @@ def format_message(text: str) -> str:
52
  """Format message with HTML for display."""
53
  return escape(text).replace("\n", "<br>")
54
 
55
- def convert_chat_history_to_messages(chat_history: List[Tuple[str, str]]) -> List[dict]:
56
- """Convert Gradio chat history to API message format."""
57
- messages = []
58
- for user_msg, assistant_msg in chat_history:
59
- # Remove HTML formatting for the API
60
- clean_user_msg = user_msg.replace("<br>", "\n")
61
- messages.append({"role": "user", "content": clean_user_msg})
62
-
63
- if assistant_msg: # Only add assistant response if it exists
64
- clean_assistant_msg = assistant_msg.replace("<br>", "\n")
65
- messages.append({"role": "assistant", "content": clean_assistant_msg})
66
- return messages
67
-
68
  def chat_interface(
69
  message: str,
70
  chat_history: List[Tuple[str, str]],
@@ -74,13 +61,26 @@ def chat_interface(
74
  if not message.strip():
75
  return chat_history, ""
76
 
77
- # Convert the entire chat history to API format
78
- messages = convert_chat_history_to_messages(chat_history)
 
 
 
 
 
 
 
 
 
 
79
 
80
- # Add the current message
81
- messages.append({"role": "user", "content": message})
 
 
 
82
 
83
- # Get the API response
84
  response = chat_completion(messages=messages, model=model)
85
 
86
  if "error" in response:
@@ -88,7 +88,7 @@ def chat_interface(
88
  else:
89
  assistant_message = response.get("choices", [{}])[0].get("message", {}).get("content", "No response")
90
 
91
- # Update chat history with the new exchange
92
  chat_history.append((format_message(message), format_message(assistant_message)))
93
  return chat_history, ""
94
 
@@ -97,7 +97,7 @@ def create_interface():
97
  available_models = get_available_models()
98
 
99
  with gr.Blocks(css=css, theme=gr.themes.Default()) as demo:
100
- gr.Markdown("# Aham2 API Chat Interface")
101
 
102
  model_dropdown = gr.Dropdown(
103
  choices=available_models,
@@ -105,7 +105,11 @@ def create_interface():
105
  label="Select Model"
106
  )
107
 
108
- chatbot = gr.Chatbot(elem_classes=["chatbot"], bubble_full_width=False)
 
 
 
 
109
 
110
  with gr.Row():
111
  message = gr.Textbox(
 
1
  import gradio as gr
2
  import requests
3
  from html import escape
4
+ from typing import List, Tuple
5
 
6
  # API endpoints
7
+ API_URL = "http://localhost:3000/v1/chat/completions" # Update with your API URL
8
+ MODELS_URL = "http://localhost:3000/v1/models" # Update with your API URL
9
 
10
  # CSS for styling
11
  css = """
 
15
  def get_available_models() -> List[str]:
16
  """Fetch available models from the API."""
17
  try:
18
+ response = requests.get(MODELS_URL, timeout=30)
19
  response.raise_for_status()
20
  models_data = response.json()
21
  return sorted([model['id'] for model in models_data.get('data', [])])
 
24
  return [
25
  "samura-deepseek-r1",
26
  "samura-gpt-4o",
 
27
  "groq-llama3-70b-8192"
28
  ]
29
 
 
38
  "messages": messages,
39
  "temperature": 0.7,
40
  "max_tokens": 2000,
41
+ "stream": False
42
  }
43
 
44
  try:
45
+ response = requests.post(API_URL, headers=headers, json=data, timeout=30)
46
  response.raise_for_status()
47
  return response.json()
48
  except Exception as e:
 
52
  """Format message with HTML for display."""
53
  return escape(text).replace("\n", "<br>")
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  def chat_interface(
56
  message: str,
57
  chat_history: List[Tuple[str, str]],
 
61
  if not message.strip():
62
  return chat_history, ""
63
 
64
+ # Convert chat history to API format
65
+ messages = []
66
+ for user_msg, assistant_msg in chat_history:
67
+ messages.append({
68
+ "role": "user",
69
+ "content": user_msg.replace("<br>", "\n")
70
+ })
71
+ if assistant_msg:
72
+ messages.append({
73
+ "role": "assistant",
74
+ "content": assistant_msg.replace("<br>", "\n")
75
+ })
76
 
77
+ # Add current message
78
+ messages.append({
79
+ "role": "user",
80
+ "content": message
81
+ })
82
 
83
+ # Get API response
84
  response = chat_completion(messages=messages, model=model)
85
 
86
  if "error" in response:
 
88
  else:
89
  assistant_message = response.get("choices", [{}])[0].get("message", {}).get("content", "No response")
90
 
91
+ # Update chat history
92
  chat_history.append((format_message(message), format_message(assistant_message)))
93
  return chat_history, ""
94
 
 
97
  available_models = get_available_models()
98
 
99
  with gr.Blocks(css=css, theme=gr.themes.Default()) as demo:
100
+ gr.Markdown("# API Chat Interface")
101
 
102
  model_dropdown = gr.Dropdown(
103
  choices=available_models,
 
105
  label="Select Model"
106
  )
107
 
108
+ chatbot = gr.Chatbot(
109
+ elem_classes=["chatbot"],
110
+ bubble_full_width=False,
111
+ height=500
112
+ )
113
 
114
  with gr.Row():
115
  message = gr.Textbox(