MySafeCode's picture
Update binapp2.py
988cf16 verified
import gradio as gr
import requests
import json
import os
from typing import List, Dict, Optional
class BinericAPI:
def __init__(self):
self.api_key = os.environ.get("Key")
if not self.api_key or self.api_key == "YOUR_API_KEY":
raise ValueError("API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')")
self.base_url = "https://api.bineric.com"
self.headers = {
'api-key': self.api_key,
'Content-Type': 'application/json'
}
def get_balance(self):
"""Fetch balance from Bineric API"""
try:
response = requests.get(
f'{self.base_url}/api/v1/monitoring/balance',
headers=self.headers
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"success": False, "error": f"API request failed: {str(e)}"}
def chat_completion(self, model: str, messages: List[Dict],
stream: bool = False, temperature: float = 0.7,
max_tokens: Optional[int] = None,
top_p: float = 0.95):
"""Get chat completion from various AI models"""
# Try multiple possible endpoint patterns
endpoints_to_try = [
f'{self.base_url}/api/v1/ai/chat/completions', # Most likely based on balance endpoint
f'{self.base_url}/api/ai/chat/completions',
f'{self.base_url}/api/v1/chat/completions',
f'{self.base_url}/v1/chat/completions',
f'{self.base_url}/chat/completions',
f'{self.base_url}/api/chat/completions',
]
# Prepare request payload
payload = {
"model": model,
"messages": messages,
"options": {
"temperature": temperature,
"top_p": top_p
}
}
if max_tokens:
payload["options"]["max_response_length"] = max_tokens
if stream:
payload["options"]["stream"] = stream
print(f"\n=== Trying chat completion with model: {model} ===")
print(f"Messages: {len(messages)}")
last_error = None
last_response = None
for endpoint in endpoints_to_try:
try:
print(f"\nTrying endpoint: {endpoint}")
print(f"Payload keys: {list(payload.keys())}")
response = requests.post(
endpoint,
headers=self.headers,
json=payload,
timeout=30
)
print(f"Status code: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Success! Response keys: {list(result.keys())}")
return result
elif response.status_code == 404:
print(f"Endpoint not found: {endpoint}")
last_response = response
continue
else:
# Try to get error message
try:
error_data = response.json()
print(f"Error response: {json.dumps(error_data, indent=2)}")
except:
print(f"Error text: {response.text[:200]}")
last_response = response
except requests.exceptions.RequestException as e:
print(f"Request failed: {str(e)}")
last_error = e
continue
# If all endpoints failed
error_msg = "All endpoints failed. "
if last_response:
error_msg += f"Last status: {last_response.status_code}. "
try:
error_data = last_response.json()
error_msg += f"Response: {json.dumps(error_data)}"
except:
error_msg += f"Response text: {last_response.text[:200]}"
elif last_error:
error_msg += f"Last error: {str(last_error)}"
return {"success": False, "error": error_msg}
# Initialize API client
try:
api_client = BinericAPI()
except ValueError as e:
api_client = None
# Available models - simplified list for testing
MODELS = [
"gemini-2.0-flash",
"gpt-5",
"gpt-4",
"claude-haiku-4-5",
"deepseek-v3.1-terminus"
]
def get_balance():
"""Fetch and display balance"""
if not api_client:
return "❌ API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')"
balance_data = api_client.get_balance()
if not balance_data.get("success", True):
return f"❌ Error: {balance_data.get('error', 'Unknown error')}"
formatted_response = json.dumps(balance_data, indent=2)
return f"✅ Balance retrieved successfully:\n\n{formatted_response}"
def test_endpoints():
"""Test all possible endpoints"""
if not api_client:
return "❌ API Key not found"
test_message = "Hello, please respond with just 'Test successful'."
messages = [{"role": "user", "content": test_message}]
result = api_client.chat_completion(
model="gemini-2.0-flash",
messages=messages,
temperature=0.7,
max_tokens=50
)
return json.dumps(result, indent=2)
def chat_complete(model, user_message, temperature, max_tokens, top_p, history=None):
"""Generate chat completion - simplified version"""
if not api_client:
return "", history, "❌ API Key not found. Please add your API key in the 'Secrets' tab"
if not user_message.strip():
return "", history, "❌ Please enter a message"
# Prepare messages - simple format
messages = [{"role": "user", "content": user_message}]
# Get completion
result = api_client.chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens if max_tokens > 0 else 1000,
top_p=top_p
)
# Check for success
if not result.get("success", True):
error_msg = result.get("error", "Unknown error")
# Try to provide helpful suggestions
suggestions = ""
if "404" in error_msg or "Not Found" in error_msg:
suggestions = "\n\n🔍 **Possible solutions:**\n"
suggestions += "1. The chat endpoint might be different\n"
suggestions += "2. Check the Bineric API documentation\n"
suggestions += "3. Try a different model\n"
suggestions += "4. Contact support for the correct endpoint"
return "", history, f"❌ Error: {error_msg}{suggestions}"
# Extract response text
response_text = result.get("response", "")
if not response_text:
# Try other possible response fields
for field in ["content", "text", "output", "message", "choices"]:
if field in result:
if field == "choices" and isinstance(result[field], list) and len(result[field]) > 0:
choice = result[field][0]
if "message" in choice and "content" in choice["message"]:
response_text = choice["message"]["content"]
break
elif "text" in choice:
response_text = choice["text"]
break
else:
response_text = str(result[field])
break
if not response_text:
response_text = "⚠️ Received response but could not parse format\n"
response_text += f"Raw response keys: {list(result.keys())}"
# Update history
if history is None:
history = []
history.append({
"user": user_message,
"assistant": response_text[:2000] # Limit length
})
# Format usage info if available
usage_info = ""
if "usage" in result:
usage = result["usage"]
usage_info = f"\n\n📊 **Usage:**\n"
# Basic info
if "prompt_tokens" in usage:
usage_info += f"- Prompt tokens: {usage['prompt_tokens']}\n"
if "completion_tokens" in usage:
usage_info += f"- Completion tokens: {usage['completion_tokens']}\n"
if "total_tokens" in usage:
usage_info += f"- Total tokens: {usage['total_tokens']}\n"
if "total_cost" in usage:
usage_info += f"- Cost: ${usage['total_cost']:.6f}\n"
return "", history, response_text + usage_info
def clear_chat():
"""Clear chat history"""
return [], "", ""
# Create Gradio interface
with gr.Blocks(title="Bineric AI Dashboard") as demo:
gr.Markdown("# 🤖 Bineric AI Dashboard")
gr.Markdown("Balance checking and AI chat completion")
# Tabs for different functionalities
with gr.Tabs():
# Tab 1: Balance Checker
with gr.Tab("💰 Balance"):
gr.Markdown("### Check your API balance")
balance_output = gr.Textbox(
label="Balance Information",
placeholder="Click 'Get Balance' to see your balance...",
lines=10
)
with gr.Row():
get_balance_btn = gr.Button("🔄 Get Balance", variant="primary")
clear_balance_btn = gr.Button("🗑️ Clear")
get_balance_btn.click(get_balance, outputs=balance_output)
clear_balance_btn.click(lambda: "", outputs=balance_output)
# Tab 2: Chat
with gr.Tab("💬 Chat Test"):
gr.Markdown("### Test Chat Completion")
with gr.Row():
# Left column
with gr.Column(scale=1):
model_selector = gr.Dropdown(
label="Select Model",
choices=MODELS,
value="gemini-2.0-flash",
interactive=True
)
user_input = gr.Textbox(
label="Message",
placeholder="Type your message here...",
lines=3,
value="Hello, how are you?"
)
with gr.Accordion("Parameters", open=False):
temperature = gr.Slider(0.0, 2.0, 0.7, step=0.1, label="Temperature")
max_tokens = gr.Slider(0, 4000, 500, step=50, label="Max Tokens")
top_p = gr.Slider(0.0, 1.0, 0.95, step=0.05, label="Top-p")
send_btn = gr.Button("🚀 Send Message", variant="primary")
# Right column
with gr.Column(scale=2):
response_output = gr.Textbox(
label="Response",
placeholder="Response will appear here...",
lines=15
)
# Simple send function
def send_message(model, message, temp, tokens, top_p_val):
if not api_client:
return "❌ API Key not found"
if not message.strip():
return "❌ Please enter a message"
messages = [{"role": "user", "content": message}]
result = api_client.chat_completion(
model=model,
messages=messages,
temperature=temp,
max_tokens=tokens if tokens > 0 else None,
top_p=top_p_val
)
if not result.get("success", True):
return f"❌ Error: {result.get('error', 'Unknown error')}"
# Extract response
response_text = result.get("response", "")
if not response_text:
return f"⚠️ No response text. Raw result:\n{json.dumps(result, indent=2)}"
# Add usage info
if "usage" in result:
usage = result["usage"]
response_text += f"\n\n📊 Usage: {usage.get('total_tokens', '?')} tokens"
if "total_cost" in usage:
response_text += f", ${usage['total_cost']:.6f} cost"
return response_text
send_btn.click(
send_message,
inputs=[model_selector, user_input, temperature, max_tokens, top_p],
outputs=response_output
)
user_input.submit(
send_message,
inputs=[model_selector, user_input, temperature, max_tokens, top_p],
outputs=response_output
)
# Tab 3: Endpoint Debug
with gr.Tab("🔧 Debug"):
gr.Markdown("### Endpoint Testing")
with gr.Accordion("Test Endpoints", open=True):
test_output = gr.Textbox(
label="Endpoint Test Results",
lines=20,
interactive=False
)
test_btn = gr.Button("🧪 Run Endpoint Tests", variant="primary")
test_btn.click(test_endpoints, outputs=test_output)
with gr.Accordion("Current Balance", open=False):
debug_balance = gr.Textbox(
label="Balance",
lines=5,
interactive=False
)
refresh_btn = gr.Button("🔄 Refresh")
refresh_btn.click(get_balance, outputs=debug_balance)
gr.Markdown("""
**Endpoint patterns being tested:**
1. `/api/v1/ai/chat/completions`
2. `/api/ai/chat/completions`
3. `/api/v1/chat/completions`
4. `/v1/chat/completions`
5. `/chat/completions`
6. `/api/chat/completions`
**Note:** Check console/logs for detailed output
""")
# Footer
gr.Markdown("""
---
**Status:**
- ✅ Balance endpoint works: `/api/v1/monitoring/balance`
- 🔍 Testing chat completion endpoint...
- Check the Debug tab for endpoint testing results
""")
# Launch the app
if __name__ == "__main__":
demo.launch(share=True)