MySafeCode's picture
Update binapp.py
b26e69c verified
import gradio as gr
import requests
import json
import os
from typing import List, Dict, Optional
class BinericAPI:
def __init__(self):
self.api_key = os.environ.get("Key")
if not self.api_key or self.api_key == "YOUR_API_KEY":
raise ValueError("API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')")
self.base_url = "https://api.bineric.com/api/v1"
self.headers = {'api-key': self.api_key}
def get_balance(self):
"""Fetch balance from Bineric API"""
try:
response = requests.get(
f'{self.base_url}/monitoring/balance',
headers=self.headers
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": f"API request failed: {str(e)}"}
def chat_completion(self, model: str, messages: List[Dict],
stream: bool = False, temperature: float = 0.7,
max_response_length: Optional[int] = None,
top_p: float = 0.95):
"""Get chat completion from various AI models"""
try:
# Prepare request payload
payload = {
"model": model,
"messages": messages,
"options": {
"stream": stream,
"temperature": temperature,
"top_p": top_p
}
}
# Add max_response_length if provided
if max_response_length:
payload["options"]["max_response_length"] = max_response_length
# Make API request
response = requests.post(
f'{self.base_url}/ai/chat/completions',
headers=self.headers,
json=payload
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": f"Chat completion failed: {str(e)}"}
# Initialize API client
try:
api_client = BinericAPI()
except ValueError as e:
api_client = None
# Available models organized by provider
MODELS = {
"OpenAI": [
"gpt-5",
"gpt-5-nano",
"gpt-4",
"o1",
"o3-mini",
"gpt-oss-120b",
"gpt-oss-20b"
],
"Google": [
"gemini-2.0-flash",
"gemini-2.5-flash",
"gemini-2.5-pro",
"gemini-3-pro-preview"
],
"Anthropic": [
"claude-3.7-sonnet",
"claude-sonnet-4",
"claude-haiku-4-5"
],
"DeepSeek": [
"deepseek-v3.1-terminus",
"deepseek-r1-0528"
],
"Meta": [
"meta-llama-3.1-8b",
"meta-llama-3.1-405b",
"meta-llama-3.3-70b"
],
"Other": [
"norskgpt",
"qwen-3-coder-480b",
"qwen-3-235b",
"mistral_large_24_02",
"llama4-maverick",
"llama4-scout"
]
}
def get_balance():
"""Fetch and display balance"""
if not api_client:
return "❌ API Key not found. Please add your API key in the 'Secrets' tab (Key = 'YOUR_API_KEY')"
balance_data = api_client.get_balance()
if "error" in balance_data:
return f"❌ Error: {balance_data['error']}"
# Format the JSON response for better readability
formatted_response = json.dumps(balance_data, indent=2)
return f"✅ Balance retrieved successfully:\n\n{formatted_response}"
def chat_complete(model, system_prompt, user_message, temperature, max_tokens, top_p, history=None):
"""Generate chat completion"""
if not api_client:
return "", history, "❌ API Key not found. Please add your API key in the 'Secrets' tab"
# Prepare messages
messages = []
# Add system prompt if provided
if system_prompt.strip():
messages.append({"role": "system", "content": system_prompt})
# Add conversation history
if history:
for entry in history:
messages.append({"role": "user", "content": entry["user"]})
if entry["assistant"]:
messages.append({"role": "assistant", "content": entry["assistant"]})
# Add current user message
messages.append({"role": "user", "content": user_message})
# Get completion
result = api_client.chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_response_length=max_tokens if max_tokens > 0 else None,
top_p=top_p
)
if "error" in result:
return "", history, f"❌ Error: {result['error']}"
# Extract response text
if "choices" in result and len(result["choices"]) > 0:
response_text = result["choices"][0].get("message", {}).get("content", "No response generated")
# Update history
if history is None:
history = []
history.append({
"user": user_message,
"assistant": response_text
})
# Format usage info if available
usage_info = ""
if "usage" in result:
usage = result["usage"]
usage_info = f"\n\n**Usage:**\n- Prompt tokens: {usage.get('prompt_tokens', 'N/A')}\n- Completion tokens: {usage.get('completion_tokens', 'N/A')}\n- Total tokens: {usage.get('total_tokens', 'N/A')}"
return "", history, response_text + usage_info
else:
return "", history, "❌ No response generated from the model"
def clear_chat():
"""Clear chat history"""
return [], "", ""
# Create Gradio interface
with gr.Blocks(title="Bineric AI Dashboard") as demo:
gr.Markdown("# 🤖 Bineric AI Dashboard")
gr.Markdown("Balance checking and AI chat completion using Bineric API")
# Tabs for different functionalities
with gr.Tabs():
# Tab 1: Balance Checker
with gr.Tab("💰 Balance"):
gr.Markdown("### Check your API balance")
with gr.Accordion("ℹ️ Setup Instructions", open=False):
gr.Markdown("""
1. **Add your API key in Gradio Secrets:**
- Go to the "Secrets" tab
- Add a new secret with:
- Key: `Key`
- Value: `YOUR_API_KEY_HERE`
2. **Click the button below to fetch balance**
""")
balance_output = gr.Textbox(
label="Balance Information",
placeholder="Click 'Get Balance' to see your balance...",
lines=15
)
with gr.Row():
get_balance_btn = gr.Button("🔄 Get Balance", variant="primary")
clear_balance_btn = gr.Button("🗑️ Clear")
get_balance_btn.click(get_balance, outputs=balance_output)
clear_balance_btn.click(lambda: "", outputs=balance_output)
# Tab 2: Chat Completion
with gr.Tab("💬 AI Chat"):
gr.Markdown("### Chat with various AI models")
with gr.Row():
# Left column: Settings and input
with gr.Column(scale=1):
# Model selection with groups
model_selector = gr.Dropdown(
label="Select AI Model",
choices=[model for models in MODELS.values() for model in models],
value="gpt-5",
interactive=True
)
# Add model provider filter
provider_filter = gr.Dropdown(
label="Filter by Provider",
choices=list(MODELS.keys()),
value="OpenAI",
interactive=True
)
# System prompt
system_prompt = gr.Textbox(
label="System Prompt (optional)",
placeholder="You are a helpful assistant...",
lines=3
)
# Generation parameters
with gr.Accordion("⚙️ Advanced Parameters", open=False):
temperature = gr.Slider(
label="Temperature",
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1
)
max_tokens = gr.Slider(
label="Max Tokens (0 = unlimited)",
minimum=0,
maximum=10000,
value=0,
step=100
)
top_p = gr.Slider(
label="Top-p",
minimum=0.0,
maximum=1.0,
value=0.95,
step=0.05
)
# Right column: Chat interface
with gr.Column(scale=2):
# Chat history
chatbot = gr.Chatbot(
label="Conversation",
height=400
)
# Hidden state for chat history
chat_state = gr.State([])
# User input
user_input = gr.Textbox(
label="Your Message",
placeholder="Type your message here...",
lines=3
)
# Response info
response_info = gr.Markdown("")
# Buttons
with gr.Row():
send_btn = gr.Button("📤 Send", variant="primary")
clear_chat_btn = gr.Button("🗑️ Clear Chat")
with gr.Row():
gr.Markdown("**Tip:** Press Shift+Enter for new line, Enter to send")
# Update model list based on provider filter
def update_model_list(provider):
return gr.Dropdown(choices=MODELS[provider], value=MODELS[provider][0])
provider_filter.change(
update_model_list,
inputs=provider_filter,
outputs=model_selector
)
# Chat functions
def send_message(model, system, message, temp, tokens, top, history):
return chat_complete(model, system, message, temp, tokens, top, history)
# Connect send button
send_btn.click(
send_message,
inputs=[model_selector, system_prompt, user_input, temperature, max_tokens, top_p, chat_state],
outputs=[user_input, chat_state, response_info]
)
# Allow Enter to send
user_input.submit(
send_message,
inputs=[model_selector, system_prompt, user_input, temperature, max_tokens, top_p, chat_state],
outputs=[user_input, chat_state, response_info]
)
# Clear chat
clear_chat_btn.click(
clear_chat,
outputs=[chat_state, user_input, response_info]
)
# Tab 3: Model Information
with gr.Tab("📚 Model Info"):
gr.Markdown("### Available AI Models")
for provider, models in MODELS.items():
with gr.Accordion(f"🔹 {provider}", open=False):
model_table = []
for model in models:
model_table.append(f"- **{model}**")
gr.Markdown("\n".join(model_table))
gr.Markdown("""
---
**API Documentation:**
- All models use the same chat completion endpoint
- Request format: JSON with messages array
- Support for system prompts and conversation history
- Token usage tracking available in responses
""")
# Footer
gr.Markdown("""
---
**Note:** This dashboard uses the Bineric API. Make sure your API key has access to the models you want to use.
Balance information is fetched from the monitoring endpoint.
""")
# Launch the app
if __name__ == "__main__":
demo.launch(
theme=gr.themes.Soft(),
share=True
)